1 /* 2 * Suspend support specific for i386/x86-64. 3 * 4 * Distribute under GPLv2 5 * 6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl> 7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> 8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 9 */ 10 11 #include <linux/suspend.h> 12 #include <linux/export.h> 13 #include <linux/smp.h> 14 15 #include <asm/pgtable.h> 16 #include <asm/proto.h> 17 #include <asm/mtrr.h> 18 #include <asm/page.h> 19 #include <asm/mce.h> 20 #include <asm/xcr.h> 21 #include <asm/suspend.h> 22 #include <asm/debugreg.h> 23 #include <asm/fpu-internal.h> /* pcntxt_mask */ 24 #include <asm/cpu.h> 25 26 #ifdef CONFIG_X86_32 27 static struct saved_context saved_context; 28 29 unsigned long saved_context_ebx; 30 unsigned long saved_context_esp, saved_context_ebp; 31 unsigned long saved_context_esi, saved_context_edi; 32 unsigned long saved_context_eflags; 33 #else 34 /* CONFIG_X86_64 */ 35 struct saved_context saved_context; 36 #endif 37 38 /** 39 * __save_processor_state - save CPU registers before creating a 40 * hibernation image and before restoring the memory state from it 41 * @ctxt - structure to store the registers contents in 42 * 43 * NOTE: If there is a CPU register the modification of which by the 44 * boot kernel (ie. the kernel used for loading the hibernation image) 45 * might affect the operations of the restored target kernel (ie. the one 46 * saved in the hibernation image), then its contents must be saved by this 47 * function. In other words, if kernel A is hibernated and different 48 * kernel B is used for loading the hibernation image into memory, the 49 * kernel A's __save_processor_state() function must save all registers 50 * needed by kernel A, so that it can operate correctly after the resume 51 * regardless of what kernel B does in the meantime. 52 */ 53 static void __save_processor_state(struct saved_context *ctxt) 54 { 55 #ifdef CONFIG_X86_32 56 mtrr_save_fixed_ranges(NULL); 57 #endif 58 kernel_fpu_begin(); 59 60 /* 61 * descriptor tables 62 */ 63 #ifdef CONFIG_X86_32 64 store_gdt(&ctxt->gdt); 65 store_idt(&ctxt->idt); 66 #else 67 /* CONFIG_X86_64 */ 68 store_gdt((struct desc_ptr *)&ctxt->gdt_limit); 69 store_idt((struct desc_ptr *)&ctxt->idt_limit); 70 #endif 71 store_tr(ctxt->tr); 72 73 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 74 /* 75 * segment registers 76 */ 77 #ifdef CONFIG_X86_32 78 savesegment(es, ctxt->es); 79 savesegment(fs, ctxt->fs); 80 savesegment(gs, ctxt->gs); 81 savesegment(ss, ctxt->ss); 82 #else 83 /* CONFIG_X86_64 */ 84 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 85 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 86 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 87 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); 88 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); 89 90 rdmsrl(MSR_FS_BASE, ctxt->fs_base); 91 rdmsrl(MSR_GS_BASE, ctxt->gs_base); 92 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 93 mtrr_save_fixed_ranges(NULL); 94 95 rdmsrl(MSR_EFER, ctxt->efer); 96 #endif 97 98 /* 99 * control registers 100 */ 101 ctxt->cr0 = read_cr0(); 102 ctxt->cr2 = read_cr2(); 103 ctxt->cr3 = read_cr3(); 104 #ifdef CONFIG_X86_32 105 ctxt->cr4 = read_cr4_safe(); 106 #else 107 /* CONFIG_X86_64 */ 108 ctxt->cr4 = read_cr4(); 109 ctxt->cr8 = read_cr8(); 110 #endif 111 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, 112 &ctxt->misc_enable); 113 } 114 115 /* Needed by apm.c */ 116 void save_processor_state(void) 117 { 118 __save_processor_state(&saved_context); 119 x86_platform.save_sched_clock_state(); 120 } 121 #ifdef CONFIG_X86_32 122 EXPORT_SYMBOL(save_processor_state); 123 #endif 124 125 static void do_fpu_end(void) 126 { 127 /* 128 * Restore FPU regs if necessary. 129 */ 130 kernel_fpu_end(); 131 } 132 133 static void fix_processor_context(void) 134 { 135 int cpu = smp_processor_id(); 136 struct tss_struct *t = &per_cpu(init_tss, cpu); 137 138 set_tss_desc(cpu, t); /* 139 * This just modifies memory; should not be 140 * necessary. But... This is necessary, because 141 * 386 hardware has concept of busy TSS or some 142 * similar stupidity. 143 */ 144 145 #ifdef CONFIG_X86_64 146 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; 147 148 syscall_init(); /* This sets MSR_*STAR and related */ 149 #endif 150 load_TR_desc(); /* This does ltr */ 151 load_LDT(¤t->active_mm->context); /* This does lldt */ 152 } 153 154 /** 155 * __restore_processor_state - restore the contents of CPU registers saved 156 * by __save_processor_state() 157 * @ctxt - structure to load the registers contents from 158 */ 159 static void __restore_processor_state(struct saved_context *ctxt) 160 { 161 if (ctxt->misc_enable_saved) 162 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); 163 /* 164 * control registers 165 */ 166 /* cr4 was introduced in the Pentium CPU */ 167 #ifdef CONFIG_X86_32 168 if (ctxt->cr4) 169 write_cr4(ctxt->cr4); 170 #else 171 /* CONFIG X86_64 */ 172 wrmsrl(MSR_EFER, ctxt->efer); 173 write_cr8(ctxt->cr8); 174 write_cr4(ctxt->cr4); 175 #endif 176 write_cr3(ctxt->cr3); 177 write_cr2(ctxt->cr2); 178 write_cr0(ctxt->cr0); 179 180 /* 181 * now restore the descriptor tables to their proper values 182 * ltr is done i fix_processor_context(). 183 */ 184 #ifdef CONFIG_X86_32 185 load_gdt(&ctxt->gdt); 186 load_idt(&ctxt->idt); 187 #else 188 /* CONFIG_X86_64 */ 189 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); 190 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 191 #endif 192 193 /* 194 * segment registers 195 */ 196 #ifdef CONFIG_X86_32 197 loadsegment(es, ctxt->es); 198 loadsegment(fs, ctxt->fs); 199 loadsegment(gs, ctxt->gs); 200 loadsegment(ss, ctxt->ss); 201 202 /* 203 * sysenter MSRs 204 */ 205 if (boot_cpu_has(X86_FEATURE_SEP)) 206 enable_sep_cpu(); 207 #else 208 /* CONFIG_X86_64 */ 209 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 210 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 211 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); 212 load_gs_index(ctxt->gs); 213 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); 214 215 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 216 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 217 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 218 #endif 219 220 /* 221 * restore XCR0 for xsave capable cpu's. 222 */ 223 if (cpu_has_xsave) 224 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); 225 226 fix_processor_context(); 227 228 do_fpu_end(); 229 x86_platform.restore_sched_clock_state(); 230 mtrr_bp_restore(); 231 } 232 233 /* Needed by apm.c */ 234 void restore_processor_state(void) 235 { 236 __restore_processor_state(&saved_context); 237 } 238 #ifdef CONFIG_X86_32 239 EXPORT_SYMBOL(restore_processor_state); 240 #endif 241 242 /* 243 * When bsp_check() is called in hibernate and suspend, cpu hotplug 244 * is disabled already. So it's unnessary to handle race condition between 245 * cpumask query and cpu hotplug. 246 */ 247 static int bsp_check(void) 248 { 249 if (cpumask_first(cpu_online_mask) != 0) { 250 pr_warn("CPU0 is offline.\n"); 251 return -ENODEV; 252 } 253 254 return 0; 255 } 256 257 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, 258 void *ptr) 259 { 260 int ret = 0; 261 262 switch (action) { 263 case PM_SUSPEND_PREPARE: 264 case PM_HIBERNATION_PREPARE: 265 ret = bsp_check(); 266 break; 267 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 268 case PM_RESTORE_PREPARE: 269 /* 270 * When system resumes from hibernation, online CPU0 because 271 * 1. it's required for resume and 272 * 2. the CPU was online before hibernation 273 */ 274 if (!cpu_online(0)) 275 _debug_hotplug_cpu(0, 1); 276 break; 277 case PM_POST_RESTORE: 278 /* 279 * When a resume really happens, this code won't be called. 280 * 281 * This code is called only when user space hibernation software 282 * prepares for snapshot device during boot time. So we just 283 * call _debug_hotplug_cpu() to restore to CPU0's state prior to 284 * preparing the snapshot device. 285 * 286 * This works for normal boot case in our CPU0 hotplug debug 287 * mode, i.e. CPU0 is offline and user mode hibernation 288 * software initializes during boot time. 289 * 290 * If CPU0 is online and user application accesses snapshot 291 * device after boot time, this will offline CPU0 and user may 292 * see different CPU0 state before and after accessing 293 * the snapshot device. But hopefully this is not a case when 294 * user debugging CPU0 hotplug. Even if users hit this case, 295 * they can easily online CPU0 back. 296 * 297 * To simplify this debug code, we only consider normal boot 298 * case. Otherwise we need to remember CPU0's state and restore 299 * to that state and resolve racy conditions etc. 300 */ 301 _debug_hotplug_cpu(0, 0); 302 break; 303 #endif 304 default: 305 break; 306 } 307 return notifier_from_errno(ret); 308 } 309 310 static int __init bsp_pm_check_init(void) 311 { 312 /* 313 * Set this bsp_pm_callback as lower priority than 314 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called 315 * earlier to disable cpu hotplug before bsp online check. 316 */ 317 pm_notifier(bsp_pm_callback, -INT_MAX); 318 return 0; 319 } 320 321 core_initcall(bsp_pm_check_init); 322