xref: /linux/arch/x86/kernel/process.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 #include <linux/errno.h>
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
9 #include <linux/pm.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <linux/stackprotector.h>
16 #include <linux/tick.h>
17 #include <linux/cpuidle.h>
18 #include <trace/events/power.h>
19 #include <linux/hw_breakpoint.h>
20 #include <asm/cpu.h>
21 #include <asm/apic.h>
22 #include <asm/syscalls.h>
23 #include <asm/idle.h>
24 #include <asm/uaccess.h>
25 #include <asm/i387.h>
26 #include <asm/fpu-internal.h>
27 #include <asm/debugreg.h>
28 #include <asm/nmi.h>
29 
30 /*
31  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
32  * no more per-task TSS's. The TSS size is kept cacheline-aligned
33  * so they are allowed to end up in the .data..cacheline_aligned
34  * section. Since TSS's are completely CPU-local, we want them
35  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
36  */
37 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
38 
39 #ifdef CONFIG_X86_64
40 static DEFINE_PER_CPU(unsigned char, is_idle);
41 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
42 
43 void idle_notifier_register(struct notifier_block *n)
44 {
45 	atomic_notifier_chain_register(&idle_notifier, n);
46 }
47 EXPORT_SYMBOL_GPL(idle_notifier_register);
48 
49 void idle_notifier_unregister(struct notifier_block *n)
50 {
51 	atomic_notifier_chain_unregister(&idle_notifier, n);
52 }
53 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
54 #endif
55 
56 struct kmem_cache *task_xstate_cachep;
57 EXPORT_SYMBOL_GPL(task_xstate_cachep);
58 
59 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
60 {
61 	int ret;
62 
63 	*dst = *src;
64 	if (fpu_allocated(&src->thread.fpu)) {
65 		memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
66 		ret = fpu_alloc(&dst->thread.fpu);
67 		if (ret)
68 			return ret;
69 		fpu_copy(&dst->thread.fpu, &src->thread.fpu);
70 	}
71 	return 0;
72 }
73 
74 void free_thread_xstate(struct task_struct *tsk)
75 {
76 	fpu_free(&tsk->thread.fpu);
77 }
78 
79 void arch_release_task_struct(struct task_struct *tsk)
80 {
81 	free_thread_xstate(tsk);
82 }
83 
84 void arch_task_cache_init(void)
85 {
86         task_xstate_cachep =
87         	kmem_cache_create("task_xstate", xstate_size,
88 				  __alignof__(union thread_xstate),
89 				  SLAB_PANIC | SLAB_NOTRACK, NULL);
90 }
91 
92 /*
93  * Free current thread data structures etc..
94  */
95 void exit_thread(void)
96 {
97 	struct task_struct *me = current;
98 	struct thread_struct *t = &me->thread;
99 	unsigned long *bp = t->io_bitmap_ptr;
100 
101 	if (bp) {
102 		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
103 
104 		t->io_bitmap_ptr = NULL;
105 		clear_thread_flag(TIF_IO_BITMAP);
106 		/*
107 		 * Careful, clear this in the TSS too:
108 		 */
109 		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
110 		t->io_bitmap_max = 0;
111 		put_cpu();
112 		kfree(bp);
113 	}
114 }
115 
116 void show_regs(struct pt_regs *regs)
117 {
118 	show_registers(regs);
119 	show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0);
120 }
121 
122 void show_regs_common(void)
123 {
124 	const char *vendor, *product, *board;
125 
126 	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
127 	if (!vendor)
128 		vendor = "";
129 	product = dmi_get_system_info(DMI_PRODUCT_NAME);
130 	if (!product)
131 		product = "";
132 
133 	/* Board Name is optional */
134 	board = dmi_get_system_info(DMI_BOARD_NAME);
135 
136 	printk(KERN_CONT "\n");
137 	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
138 		current->pid, current->comm, print_tainted(),
139 		init_utsname()->release,
140 		(int)strcspn(init_utsname()->version, " "),
141 		init_utsname()->version);
142 	printk(KERN_CONT " %s %s", vendor, product);
143 	if (board)
144 		printk(KERN_CONT "/%s", board);
145 	printk(KERN_CONT "\n");
146 }
147 
148 void flush_thread(void)
149 {
150 	struct task_struct *tsk = current;
151 
152 	flush_ptrace_hw_breakpoint(tsk);
153 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
154 	/*
155 	 * Forget coprocessor state..
156 	 */
157 	tsk->fpu_counter = 0;
158 	clear_fpu(tsk);
159 	clear_used_math();
160 }
161 
162 static void hard_disable_TSC(void)
163 {
164 	write_cr4(read_cr4() | X86_CR4_TSD);
165 }
166 
167 void disable_TSC(void)
168 {
169 	preempt_disable();
170 	if (!test_and_set_thread_flag(TIF_NOTSC))
171 		/*
172 		 * Must flip the CPU state synchronously with
173 		 * TIF_NOTSC in the current running context.
174 		 */
175 		hard_disable_TSC();
176 	preempt_enable();
177 }
178 
179 static void hard_enable_TSC(void)
180 {
181 	write_cr4(read_cr4() & ~X86_CR4_TSD);
182 }
183 
184 static void enable_TSC(void)
185 {
186 	preempt_disable();
187 	if (test_and_clear_thread_flag(TIF_NOTSC))
188 		/*
189 		 * Must flip the CPU state synchronously with
190 		 * TIF_NOTSC in the current running context.
191 		 */
192 		hard_enable_TSC();
193 	preempt_enable();
194 }
195 
196 int get_tsc_mode(unsigned long adr)
197 {
198 	unsigned int val;
199 
200 	if (test_thread_flag(TIF_NOTSC))
201 		val = PR_TSC_SIGSEGV;
202 	else
203 		val = PR_TSC_ENABLE;
204 
205 	return put_user(val, (unsigned int __user *)adr);
206 }
207 
208 int set_tsc_mode(unsigned int val)
209 {
210 	if (val == PR_TSC_SIGSEGV)
211 		disable_TSC();
212 	else if (val == PR_TSC_ENABLE)
213 		enable_TSC();
214 	else
215 		return -EINVAL;
216 
217 	return 0;
218 }
219 
220 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
221 		      struct tss_struct *tss)
222 {
223 	struct thread_struct *prev, *next;
224 
225 	prev = &prev_p->thread;
226 	next = &next_p->thread;
227 
228 	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
229 	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
230 		unsigned long debugctl = get_debugctlmsr();
231 
232 		debugctl &= ~DEBUGCTLMSR_BTF;
233 		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
234 			debugctl |= DEBUGCTLMSR_BTF;
235 
236 		update_debugctlmsr(debugctl);
237 	}
238 
239 	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
240 	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
241 		/* prev and next are different */
242 		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
243 			hard_disable_TSC();
244 		else
245 			hard_enable_TSC();
246 	}
247 
248 	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
249 		/*
250 		 * Copy the relevant range of the IO bitmap.
251 		 * Normally this is 128 bytes or less:
252 		 */
253 		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
254 		       max(prev->io_bitmap_max, next->io_bitmap_max));
255 	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
256 		/*
257 		 * Clear any possible leftover bits:
258 		 */
259 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
260 	}
261 	propagate_user_return_notify(prev_p, next_p);
262 }
263 
264 int sys_fork(struct pt_regs *regs)
265 {
266 	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
267 }
268 
269 /*
270  * This is trivial, and on the face of it looks like it
271  * could equally well be done in user mode.
272  *
273  * Not so, for quite unobvious reasons - register pressure.
274  * In user mode vfork() cannot have a stack frame, and if
275  * done by calling the "clone()" system call directly, you
276  * do not have enough call-clobbered registers to hold all
277  * the information you need.
278  */
279 int sys_vfork(struct pt_regs *regs)
280 {
281 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
282 		       NULL, NULL);
283 }
284 
285 long
286 sys_clone(unsigned long clone_flags, unsigned long newsp,
287 	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
288 {
289 	if (!newsp)
290 		newsp = regs->sp;
291 	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
292 }
293 
294 /*
295  * This gets run with %si containing the
296  * function to call, and %di containing
297  * the "args".
298  */
299 extern void kernel_thread_helper(void);
300 
301 /*
302  * Create a kernel thread
303  */
304 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
305 {
306 	struct pt_regs regs;
307 
308 	memset(&regs, 0, sizeof(regs));
309 
310 	regs.si = (unsigned long) fn;
311 	regs.di = (unsigned long) arg;
312 
313 #ifdef CONFIG_X86_32
314 	regs.ds = __USER_DS;
315 	regs.es = __USER_DS;
316 	regs.fs = __KERNEL_PERCPU;
317 	regs.gs = __KERNEL_STACK_CANARY;
318 #else
319 	regs.ss = __KERNEL_DS;
320 #endif
321 
322 	regs.orig_ax = -1;
323 	regs.ip = (unsigned long) kernel_thread_helper;
324 	regs.cs = __KERNEL_CS | get_kernel_rpl();
325 	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
326 
327 	/* Ok, create the new process.. */
328 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
329 }
330 EXPORT_SYMBOL(kernel_thread);
331 
332 /*
333  * sys_execve() executes a new program.
334  */
335 long sys_execve(const char __user *name,
336 		const char __user *const __user *argv,
337 		const char __user *const __user *envp, struct pt_regs *regs)
338 {
339 	long error;
340 	char *filename;
341 
342 	filename = getname(name);
343 	error = PTR_ERR(filename);
344 	if (IS_ERR(filename))
345 		return error;
346 	error = do_execve(filename, argv, envp, regs);
347 
348 #ifdef CONFIG_X86_32
349 	if (error == 0) {
350 		/* Make sure we don't return using sysenter.. */
351                 set_thread_flag(TIF_IRET);
352         }
353 #endif
354 
355 	putname(filename);
356 	return error;
357 }
358 
359 /*
360  * Idle related variables and functions
361  */
362 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
363 EXPORT_SYMBOL(boot_option_idle_override);
364 
365 /*
366  * Powermanagement idle function, if any..
367  */
368 void (*pm_idle)(void);
369 #ifdef CONFIG_APM_MODULE
370 EXPORT_SYMBOL(pm_idle);
371 #endif
372 
373 static inline int hlt_use_halt(void)
374 {
375 	return 1;
376 }
377 
378 #ifndef CONFIG_SMP
379 static inline void play_dead(void)
380 {
381 	BUG();
382 }
383 #endif
384 
385 #ifdef CONFIG_X86_64
386 void enter_idle(void)
387 {
388 	this_cpu_write(is_idle, 1);
389 	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
390 }
391 
392 static void __exit_idle(void)
393 {
394 	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
395 		return;
396 	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
397 }
398 
399 /* Called from interrupts to signify idle end */
400 void exit_idle(void)
401 {
402 	/* idle loop has pid 0 */
403 	if (current->pid)
404 		return;
405 	__exit_idle();
406 }
407 #endif
408 
409 /*
410  * The idle thread. There's no useful work to be
411  * done, so just try to conserve power and have a
412  * low exit latency (ie sit in a loop waiting for
413  * somebody to say that they'd like to reschedule)
414  */
415 void cpu_idle(void)
416 {
417 	/*
418 	 * If we're the non-boot CPU, nothing set the stack canary up
419 	 * for us.  CPU0 already has it initialized but no harm in
420 	 * doing it again.  This is a good place for updating it, as
421 	 * we wont ever return from this function (so the invalid
422 	 * canaries already on the stack wont ever trigger).
423 	 */
424 	boot_init_stack_canary();
425 	current_thread_info()->status |= TS_POLLING;
426 
427 	while (1) {
428 		tick_nohz_idle_enter();
429 
430 		while (!need_resched()) {
431 			rmb();
432 
433 			if (cpu_is_offline(smp_processor_id()))
434 				play_dead();
435 
436 			/*
437 			 * Idle routines should keep interrupts disabled
438 			 * from here on, until they go to idle.
439 			 * Otherwise, idle callbacks can misfire.
440 			 */
441 			local_touch_nmi();
442 			local_irq_disable();
443 
444 			enter_idle();
445 
446 			/* Don't trace irqs off for idle */
447 			stop_critical_timings();
448 
449 			/* enter_idle() needs rcu for notifiers */
450 			rcu_idle_enter();
451 
452 			if (cpuidle_idle_call())
453 				pm_idle();
454 
455 			rcu_idle_exit();
456 			start_critical_timings();
457 
458 			/* In many cases the interrupt that ended idle
459 			   has already called exit_idle. But some idle
460 			   loops can be woken up without interrupt. */
461 			__exit_idle();
462 		}
463 
464 		tick_nohz_idle_exit();
465 		preempt_enable_no_resched();
466 		schedule();
467 		preempt_disable();
468 	}
469 }
470 
471 /*
472  * We use this if we don't have any better
473  * idle routine..
474  */
475 void default_idle(void)
476 {
477 	if (hlt_use_halt()) {
478 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
479 		trace_cpu_idle_rcuidle(1, smp_processor_id());
480 		current_thread_info()->status &= ~TS_POLLING;
481 		/*
482 		 * TS_POLLING-cleared state must be visible before we
483 		 * test NEED_RESCHED:
484 		 */
485 		smp_mb();
486 
487 		if (!need_resched())
488 			safe_halt();	/* enables interrupts racelessly */
489 		else
490 			local_irq_enable();
491 		current_thread_info()->status |= TS_POLLING;
492 		trace_power_end_rcuidle(smp_processor_id());
493 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
494 	} else {
495 		local_irq_enable();
496 		/* loop is done by the caller */
497 		cpu_relax();
498 	}
499 }
500 #ifdef CONFIG_APM_MODULE
501 EXPORT_SYMBOL(default_idle);
502 #endif
503 
504 bool set_pm_idle_to_default(void)
505 {
506 	bool ret = !!pm_idle;
507 
508 	pm_idle = default_idle;
509 
510 	return ret;
511 }
512 void stop_this_cpu(void *dummy)
513 {
514 	local_irq_disable();
515 	/*
516 	 * Remove this CPU:
517 	 */
518 	set_cpu_online(smp_processor_id(), false);
519 	disable_local_APIC();
520 
521 	for (;;) {
522 		if (hlt_works(smp_processor_id()))
523 			halt();
524 	}
525 }
526 
527 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
528 static void mwait_idle(void)
529 {
530 	if (!need_resched()) {
531 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
532 		trace_cpu_idle_rcuidle(1, smp_processor_id());
533 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
534 			clflush((void *)&current_thread_info()->flags);
535 
536 		__monitor((void *)&current_thread_info()->flags, 0, 0);
537 		smp_mb();
538 		if (!need_resched())
539 			__sti_mwait(0, 0);
540 		else
541 			local_irq_enable();
542 		trace_power_end_rcuidle(smp_processor_id());
543 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
544 	} else
545 		local_irq_enable();
546 }
547 
548 /*
549  * On SMP it's slightly faster (but much more power-consuming!)
550  * to poll the ->work.need_resched flag instead of waiting for the
551  * cross-CPU IPI to arrive. Use this option with caution.
552  */
553 static void poll_idle(void)
554 {
555 	trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
556 	trace_cpu_idle_rcuidle(0, smp_processor_id());
557 	local_irq_enable();
558 	while (!need_resched())
559 		cpu_relax();
560 	trace_power_end_rcuidle(smp_processor_id());
561 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
562 }
563 
564 /*
565  * mwait selection logic:
566  *
567  * It depends on the CPU. For AMD CPUs that support MWAIT this is
568  * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
569  * then depend on a clock divisor and current Pstate of the core. If
570  * all cores of a processor are in halt state (C1) the processor can
571  * enter the C1E (C1 enhanced) state. If mwait is used this will never
572  * happen.
573  *
574  * idle=mwait overrides this decision and forces the usage of mwait.
575  */
576 
577 #define MWAIT_INFO			0x05
578 #define MWAIT_ECX_EXTENDED_INFO		0x01
579 #define MWAIT_EDX_C1			0xf0
580 
581 int mwait_usable(const struct cpuinfo_x86 *c)
582 {
583 	u32 eax, ebx, ecx, edx;
584 
585 	/* Use mwait if idle=mwait boot option is given */
586 	if (boot_option_idle_override == IDLE_FORCE_MWAIT)
587 		return 1;
588 
589 	/*
590 	 * Any idle= boot option other than idle=mwait means that we must not
591 	 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
592 	 */
593 	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
594 		return 0;
595 
596 	if (c->cpuid_level < MWAIT_INFO)
597 		return 0;
598 
599 	cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
600 	/* Check, whether EDX has extended info about MWAIT */
601 	if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
602 		return 1;
603 
604 	/*
605 	 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
606 	 * C1  supports MWAIT
607 	 */
608 	return (edx & MWAIT_EDX_C1);
609 }
610 
611 bool amd_e400_c1e_detected;
612 EXPORT_SYMBOL(amd_e400_c1e_detected);
613 
614 static cpumask_var_t amd_e400_c1e_mask;
615 
616 void amd_e400_remove_cpu(int cpu)
617 {
618 	if (amd_e400_c1e_mask != NULL)
619 		cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
620 }
621 
622 /*
623  * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
624  * pending message MSR. If we detect C1E, then we handle it the same
625  * way as C3 power states (local apic timer and TSC stop)
626  */
627 static void amd_e400_idle(void)
628 {
629 	if (need_resched())
630 		return;
631 
632 	if (!amd_e400_c1e_detected) {
633 		u32 lo, hi;
634 
635 		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
636 
637 		if (lo & K8_INTP_C1E_ACTIVE_MASK) {
638 			amd_e400_c1e_detected = true;
639 			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
640 				mark_tsc_unstable("TSC halt in AMD C1E");
641 			printk(KERN_INFO "System has AMD C1E enabled\n");
642 		}
643 	}
644 
645 	if (amd_e400_c1e_detected) {
646 		int cpu = smp_processor_id();
647 
648 		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
649 			cpumask_set_cpu(cpu, amd_e400_c1e_mask);
650 			/*
651 			 * Force broadcast so ACPI can not interfere.
652 			 */
653 			clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
654 					   &cpu);
655 			printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
656 			       cpu);
657 		}
658 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
659 
660 		default_idle();
661 
662 		/*
663 		 * The switch back from broadcast mode needs to be
664 		 * called with interrupts disabled.
665 		 */
666 		 local_irq_disable();
667 		 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
668 		 local_irq_enable();
669 	} else
670 		default_idle();
671 }
672 
673 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
674 {
675 #ifdef CONFIG_SMP
676 	if (pm_idle == poll_idle && smp_num_siblings > 1) {
677 		printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
678 			" performance may degrade.\n");
679 	}
680 #endif
681 	if (pm_idle)
682 		return;
683 
684 	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
685 		/*
686 		 * One CPU supports mwait => All CPUs supports mwait
687 		 */
688 		printk(KERN_INFO "using mwait in idle threads.\n");
689 		pm_idle = mwait_idle;
690 	} else if (cpu_has_amd_erratum(amd_erratum_400)) {
691 		/* E400: APIC timer interrupt does not wake up CPU from C1e */
692 		printk(KERN_INFO "using AMD E400 aware idle routine\n");
693 		pm_idle = amd_e400_idle;
694 	} else
695 		pm_idle = default_idle;
696 }
697 
698 void __init init_amd_e400_c1e_mask(void)
699 {
700 	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
701 	if (pm_idle == amd_e400_idle)
702 		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
703 }
704 
705 static int __init idle_setup(char *str)
706 {
707 	if (!str)
708 		return -EINVAL;
709 
710 	if (!strcmp(str, "poll")) {
711 		printk("using polling idle threads.\n");
712 		pm_idle = poll_idle;
713 		boot_option_idle_override = IDLE_POLL;
714 	} else if (!strcmp(str, "mwait")) {
715 		boot_option_idle_override = IDLE_FORCE_MWAIT;
716 		WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
717 	} else if (!strcmp(str, "halt")) {
718 		/*
719 		 * When the boot option of idle=halt is added, halt is
720 		 * forced to be used for CPU idle. In such case CPU C2/C3
721 		 * won't be used again.
722 		 * To continue to load the CPU idle driver, don't touch
723 		 * the boot_option_idle_override.
724 		 */
725 		pm_idle = default_idle;
726 		boot_option_idle_override = IDLE_HALT;
727 	} else if (!strcmp(str, "nomwait")) {
728 		/*
729 		 * If the boot option of "idle=nomwait" is added,
730 		 * it means that mwait will be disabled for CPU C2/C3
731 		 * states. In such case it won't touch the variable
732 		 * of boot_option_idle_override.
733 		 */
734 		boot_option_idle_override = IDLE_NOMWAIT;
735 	} else
736 		return -1;
737 
738 	return 0;
739 }
740 early_param("idle", idle_setup);
741 
742 unsigned long arch_align_stack(unsigned long sp)
743 {
744 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
745 		sp -= get_random_int() % 8192;
746 	return sp & ~0xf;
747 }
748 
749 unsigned long arch_randomize_brk(struct mm_struct *mm)
750 {
751 	unsigned long range_end = mm->brk + 0x02000000;
752 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
753 }
754 
755