xref: /linux/arch/x86/power/cpu.c (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /*
2  * Suspend support specific for i386/x86-64.
3  *
4  * Distribute under GPLv2
5  *
6  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9  */
10 
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15 
16 #include <asm/pgtable.h>
17 #include <asm/proto.h>
18 #include <asm/mtrr.h>
19 #include <asm/page.h>
20 #include <asm/mce.h>
21 #include <asm/xcr.h>
22 #include <asm/suspend.h>
23 #include <asm/debugreg.h>
24 #include <asm/fpu-internal.h> /* pcntxt_mask */
25 #include <asm/cpu.h>
26 
27 #ifdef CONFIG_X86_32
28 __visible unsigned long saved_context_ebx;
29 __visible unsigned long saved_context_esp, saved_context_ebp;
30 __visible unsigned long saved_context_esi, saved_context_edi;
31 __visible unsigned long saved_context_eflags;
32 #endif
33 struct saved_context saved_context;
34 
35 /**
36  *	__save_processor_state - save CPU registers before creating a
37  *		hibernation image and before restoring the memory state from it
38  *	@ctxt - structure to store the registers contents in
39  *
40  *	NOTE: If there is a CPU register the modification of which by the
41  *	boot kernel (ie. the kernel used for loading the hibernation image)
42  *	might affect the operations of the restored target kernel (ie. the one
43  *	saved in the hibernation image), then its contents must be saved by this
44  *	function.  In other words, if kernel A is hibernated and different
45  *	kernel B is used for loading the hibernation image into memory, the
46  *	kernel A's __save_processor_state() function must save all registers
47  *	needed by kernel A, so that it can operate correctly after the resume
48  *	regardless of what kernel B does in the meantime.
49  */
50 static void __save_processor_state(struct saved_context *ctxt)
51 {
52 #ifdef CONFIG_X86_32
53 	mtrr_save_fixed_ranges(NULL);
54 #endif
55 	kernel_fpu_begin();
56 
57 	/*
58 	 * descriptor tables
59 	 */
60 #ifdef CONFIG_X86_32
61 	store_idt(&ctxt->idt);
62 #else
63 /* CONFIG_X86_64 */
64 	store_idt((struct desc_ptr *)&ctxt->idt_limit);
65 #endif
66 	/*
67 	 * We save it here, but restore it only in the hibernate case.
68 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
69 	 * mode in "secondary_startup_64". In 32-bit mode it is done via
70 	 * 'pmode_gdt' in wakeup_start.
71 	 */
72 	ctxt->gdt_desc.size = GDT_SIZE - 1;
73 	ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_table(smp_processor_id());
74 
75 	store_tr(ctxt->tr);
76 
77 	/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
78 	/*
79 	 * segment registers
80 	 */
81 #ifdef CONFIG_X86_32
82 	savesegment(es, ctxt->es);
83 	savesegment(fs, ctxt->fs);
84 	savesegment(gs, ctxt->gs);
85 	savesegment(ss, ctxt->ss);
86 #else
87 /* CONFIG_X86_64 */
88 	asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
89 	asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
90 	asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
91 	asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
92 	asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
93 
94 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
95 	rdmsrl(MSR_GS_BASE, ctxt->gs_base);
96 	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
97 	mtrr_save_fixed_ranges(NULL);
98 
99 	rdmsrl(MSR_EFER, ctxt->efer);
100 #endif
101 
102 	/*
103 	 * control registers
104 	 */
105 	ctxt->cr0 = read_cr0();
106 	ctxt->cr2 = read_cr2();
107 	ctxt->cr3 = read_cr3();
108 	ctxt->cr4 = __read_cr4_safe();
109 #ifdef CONFIG_X86_64
110 	ctxt->cr8 = read_cr8();
111 #endif
112 	ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
113 					       &ctxt->misc_enable);
114 }
115 
116 /* Needed by apm.c */
117 void save_processor_state(void)
118 {
119 	__save_processor_state(&saved_context);
120 	x86_platform.save_sched_clock_state();
121 }
122 #ifdef CONFIG_X86_32
123 EXPORT_SYMBOL(save_processor_state);
124 #endif
125 
126 static void do_fpu_end(void)
127 {
128 	/*
129 	 * Restore FPU regs if necessary.
130 	 */
131 	kernel_fpu_end();
132 }
133 
134 static void fix_processor_context(void)
135 {
136 	int cpu = smp_processor_id();
137 	struct tss_struct *t = &per_cpu(init_tss, cpu);
138 #ifdef CONFIG_X86_64
139 	struct desc_struct *desc = get_cpu_gdt_table(cpu);
140 	tss_desc tss;
141 #endif
142 	set_tss_desc(cpu, t);	/*
143 				 * This just modifies memory; should not be
144 				 * necessary. But... This is necessary, because
145 				 * 386 hardware has concept of busy TSS or some
146 				 * similar stupidity.
147 				 */
148 
149 #ifdef CONFIG_X86_64
150 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
151 	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
152 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
153 
154 	syscall_init();				/* This sets MSR_*STAR and related */
155 #endif
156 	load_TR_desc();				/* This does ltr */
157 	load_LDT(&current->active_mm->context);	/* This does lldt */
158 }
159 
160 /**
161  *	__restore_processor_state - restore the contents of CPU registers saved
162  *		by __save_processor_state()
163  *	@ctxt - structure to load the registers contents from
164  */
165 static void notrace __restore_processor_state(struct saved_context *ctxt)
166 {
167 	if (ctxt->misc_enable_saved)
168 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
169 	/*
170 	 * control registers
171 	 */
172 	/* cr4 was introduced in the Pentium CPU */
173 #ifdef CONFIG_X86_32
174 	if (ctxt->cr4)
175 		__write_cr4(ctxt->cr4);
176 #else
177 /* CONFIG X86_64 */
178 	wrmsrl(MSR_EFER, ctxt->efer);
179 	write_cr8(ctxt->cr8);
180 	__write_cr4(ctxt->cr4);
181 #endif
182 	write_cr3(ctxt->cr3);
183 	write_cr2(ctxt->cr2);
184 	write_cr0(ctxt->cr0);
185 
186 	/*
187 	 * now restore the descriptor tables to their proper values
188 	 * ltr is done i fix_processor_context().
189 	 */
190 #ifdef CONFIG_X86_32
191 	load_idt(&ctxt->idt);
192 #else
193 /* CONFIG_X86_64 */
194 	load_idt((const struct desc_ptr *)&ctxt->idt_limit);
195 #endif
196 
197 	/*
198 	 * segment registers
199 	 */
200 #ifdef CONFIG_X86_32
201 	loadsegment(es, ctxt->es);
202 	loadsegment(fs, ctxt->fs);
203 	loadsegment(gs, ctxt->gs);
204 	loadsegment(ss, ctxt->ss);
205 
206 	/*
207 	 * sysenter MSRs
208 	 */
209 	if (boot_cpu_has(X86_FEATURE_SEP))
210 		enable_sep_cpu();
211 #else
212 /* CONFIG_X86_64 */
213 	asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
214 	asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
215 	asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
216 	load_gs_index(ctxt->gs);
217 	asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
218 
219 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
220 	wrmsrl(MSR_GS_BASE, ctxt->gs_base);
221 	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
222 #endif
223 
224 	/*
225 	 * restore XCR0 for xsave capable cpu's.
226 	 */
227 	if (cpu_has_xsave)
228 		xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
229 
230 	fix_processor_context();
231 
232 	do_fpu_end();
233 	x86_platform.restore_sched_clock_state();
234 	mtrr_bp_restore();
235 	perf_restore_debug_store();
236 }
237 
238 /* Needed by apm.c */
239 void notrace restore_processor_state(void)
240 {
241 	__restore_processor_state(&saved_context);
242 }
243 #ifdef CONFIG_X86_32
244 EXPORT_SYMBOL(restore_processor_state);
245 #endif
246 
247 /*
248  * When bsp_check() is called in hibernate and suspend, cpu hotplug
249  * is disabled already. So it's unnessary to handle race condition between
250  * cpumask query and cpu hotplug.
251  */
252 static int bsp_check(void)
253 {
254 	if (cpumask_first(cpu_online_mask) != 0) {
255 		pr_warn("CPU0 is offline.\n");
256 		return -ENODEV;
257 	}
258 
259 	return 0;
260 }
261 
262 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
263 			   void *ptr)
264 {
265 	int ret = 0;
266 
267 	switch (action) {
268 	case PM_SUSPEND_PREPARE:
269 	case PM_HIBERNATION_PREPARE:
270 		ret = bsp_check();
271 		break;
272 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
273 	case PM_RESTORE_PREPARE:
274 		/*
275 		 * When system resumes from hibernation, online CPU0 because
276 		 * 1. it's required for resume and
277 		 * 2. the CPU was online before hibernation
278 		 */
279 		if (!cpu_online(0))
280 			_debug_hotplug_cpu(0, 1);
281 		break;
282 	case PM_POST_RESTORE:
283 		/*
284 		 * When a resume really happens, this code won't be called.
285 		 *
286 		 * This code is called only when user space hibernation software
287 		 * prepares for snapshot device during boot time. So we just
288 		 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
289 		 * preparing the snapshot device.
290 		 *
291 		 * This works for normal boot case in our CPU0 hotplug debug
292 		 * mode, i.e. CPU0 is offline and user mode hibernation
293 		 * software initializes during boot time.
294 		 *
295 		 * If CPU0 is online and user application accesses snapshot
296 		 * device after boot time, this will offline CPU0 and user may
297 		 * see different CPU0 state before and after accessing
298 		 * the snapshot device. But hopefully this is not a case when
299 		 * user debugging CPU0 hotplug. Even if users hit this case,
300 		 * they can easily online CPU0 back.
301 		 *
302 		 * To simplify this debug code, we only consider normal boot
303 		 * case. Otherwise we need to remember CPU0's state and restore
304 		 * to that state and resolve racy conditions etc.
305 		 */
306 		_debug_hotplug_cpu(0, 0);
307 		break;
308 #endif
309 	default:
310 		break;
311 	}
312 	return notifier_from_errno(ret);
313 }
314 
315 static int __init bsp_pm_check_init(void)
316 {
317 	/*
318 	 * Set this bsp_pm_callback as lower priority than
319 	 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
320 	 * earlier to disable cpu hotplug before bsp online check.
321 	 */
322 	pm_notifier(bsp_pm_callback, -INT_MAX);
323 	return 0;
324 }
325 
326 core_initcall(bsp_pm_check_init);
327