xref: /linux/arch/x86/kernel/process.c (revision 3b64b1881143ce9e461c211cc81acc72d0cdc476)
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2 
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/smp.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/pm.h>
12 #include <linux/clockchips.h>
13 #include <linux/random.h>
14 #include <linux/user-return-notifier.h>
15 #include <linux/dmi.h>
16 #include <linux/utsname.h>
17 #include <linux/stackprotector.h>
18 #include <linux/tick.h>
19 #include <linux/cpuidle.h>
20 #include <trace/events/power.h>
21 #include <linux/hw_breakpoint.h>
22 #include <asm/cpu.h>
23 #include <asm/apic.h>
24 #include <asm/syscalls.h>
25 #include <asm/idle.h>
26 #include <asm/uaccess.h>
27 #include <asm/i387.h>
28 #include <asm/fpu-internal.h>
29 #include <asm/debugreg.h>
30 #include <asm/nmi.h>
31 
32 /*
33  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
34  * no more per-task TSS's. The TSS size is kept cacheline-aligned
35  * so they are allowed to end up in the .data..cacheline_aligned
36  * section. Since TSS's are completely CPU-local, we want them
37  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
38  */
39 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40 
41 #ifdef CONFIG_X86_64
42 static DEFINE_PER_CPU(unsigned char, is_idle);
43 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
44 
45 void idle_notifier_register(struct notifier_block *n)
46 {
47 	atomic_notifier_chain_register(&idle_notifier, n);
48 }
49 EXPORT_SYMBOL_GPL(idle_notifier_register);
50 
51 void idle_notifier_unregister(struct notifier_block *n)
52 {
53 	atomic_notifier_chain_unregister(&idle_notifier, n);
54 }
55 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
56 #endif
57 
58 struct kmem_cache *task_xstate_cachep;
59 EXPORT_SYMBOL_GPL(task_xstate_cachep);
60 
61 /*
62  * this gets called so that we can store lazy state into memory and copy the
63  * current task into the new thread.
64  */
65 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
66 {
67 	int ret;
68 
69 	*dst = *src;
70 	if (fpu_allocated(&src->thread.fpu)) {
71 		memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
72 		ret = fpu_alloc(&dst->thread.fpu);
73 		if (ret)
74 			return ret;
75 		fpu_copy(dst, src);
76 	}
77 	return 0;
78 }
79 
80 void free_thread_xstate(struct task_struct *tsk)
81 {
82 	fpu_free(&tsk->thread.fpu);
83 }
84 
85 void arch_release_task_struct(struct task_struct *tsk)
86 {
87 	free_thread_xstate(tsk);
88 }
89 
90 void arch_task_cache_init(void)
91 {
92         task_xstate_cachep =
93         	kmem_cache_create("task_xstate", xstate_size,
94 				  __alignof__(union thread_xstate),
95 				  SLAB_PANIC | SLAB_NOTRACK, NULL);
96 }
97 
98 /*
99  * Free current thread data structures etc..
100  */
101 void exit_thread(void)
102 {
103 	struct task_struct *me = current;
104 	struct thread_struct *t = &me->thread;
105 	unsigned long *bp = t->io_bitmap_ptr;
106 
107 	if (bp) {
108 		struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
109 
110 		t->io_bitmap_ptr = NULL;
111 		clear_thread_flag(TIF_IO_BITMAP);
112 		/*
113 		 * Careful, clear this in the TSS too:
114 		 */
115 		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
116 		t->io_bitmap_max = 0;
117 		put_cpu();
118 		kfree(bp);
119 	}
120 
121 	drop_fpu(me);
122 }
123 
124 void show_regs_common(void)
125 {
126 	const char *vendor, *product, *board;
127 
128 	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
129 	if (!vendor)
130 		vendor = "";
131 	product = dmi_get_system_info(DMI_PRODUCT_NAME);
132 	if (!product)
133 		product = "";
134 
135 	/* Board Name is optional */
136 	board = dmi_get_system_info(DMI_BOARD_NAME);
137 
138 	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
139 	       current->pid, current->comm, print_tainted(),
140 	       init_utsname()->release,
141 	       (int)strcspn(init_utsname()->version, " "),
142 	       init_utsname()->version,
143 	       vendor, product,
144 	       board ? "/" : "",
145 	       board ? board : "");
146 }
147 
148 void flush_thread(void)
149 {
150 	struct task_struct *tsk = current;
151 
152 	flush_ptrace_hw_breakpoint(tsk);
153 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
154 	drop_init_fpu(tsk);
155 	/*
156 	 * Free the FPU state for non xsave platforms. They get reallocated
157 	 * lazily at the first use.
158 	 */
159 	if (!use_eager_fpu())
160 		free_thread_xstate(tsk);
161 }
162 
163 static void hard_disable_TSC(void)
164 {
165 	write_cr4(read_cr4() | X86_CR4_TSD);
166 }
167 
168 void disable_TSC(void)
169 {
170 	preempt_disable();
171 	if (!test_and_set_thread_flag(TIF_NOTSC))
172 		/*
173 		 * Must flip the CPU state synchronously with
174 		 * TIF_NOTSC in the current running context.
175 		 */
176 		hard_disable_TSC();
177 	preempt_enable();
178 }
179 
180 static void hard_enable_TSC(void)
181 {
182 	write_cr4(read_cr4() & ~X86_CR4_TSD);
183 }
184 
185 static void enable_TSC(void)
186 {
187 	preempt_disable();
188 	if (test_and_clear_thread_flag(TIF_NOTSC))
189 		/*
190 		 * Must flip the CPU state synchronously with
191 		 * TIF_NOTSC in the current running context.
192 		 */
193 		hard_enable_TSC();
194 	preempt_enable();
195 }
196 
197 int get_tsc_mode(unsigned long adr)
198 {
199 	unsigned int val;
200 
201 	if (test_thread_flag(TIF_NOTSC))
202 		val = PR_TSC_SIGSEGV;
203 	else
204 		val = PR_TSC_ENABLE;
205 
206 	return put_user(val, (unsigned int __user *)adr);
207 }
208 
209 int set_tsc_mode(unsigned int val)
210 {
211 	if (val == PR_TSC_SIGSEGV)
212 		disable_TSC();
213 	else if (val == PR_TSC_ENABLE)
214 		enable_TSC();
215 	else
216 		return -EINVAL;
217 
218 	return 0;
219 }
220 
221 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
222 		      struct tss_struct *tss)
223 {
224 	struct thread_struct *prev, *next;
225 
226 	prev = &prev_p->thread;
227 	next = &next_p->thread;
228 
229 	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
230 	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
231 		unsigned long debugctl = get_debugctlmsr();
232 
233 		debugctl &= ~DEBUGCTLMSR_BTF;
234 		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
235 			debugctl |= DEBUGCTLMSR_BTF;
236 
237 		update_debugctlmsr(debugctl);
238 	}
239 
240 	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
241 	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
242 		/* prev and next are different */
243 		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
244 			hard_disable_TSC();
245 		else
246 			hard_enable_TSC();
247 	}
248 
249 	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
250 		/*
251 		 * Copy the relevant range of the IO bitmap.
252 		 * Normally this is 128 bytes or less:
253 		 */
254 		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
255 		       max(prev->io_bitmap_max, next->io_bitmap_max));
256 	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
257 		/*
258 		 * Clear any possible leftover bits:
259 		 */
260 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
261 	}
262 	propagate_user_return_notify(prev_p, next_p);
263 }
264 
265 int sys_fork(struct pt_regs *regs)
266 {
267 	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
268 }
269 
270 /*
271  * This is trivial, and on the face of it looks like it
272  * could equally well be done in user mode.
273  *
274  * Not so, for quite unobvious reasons - register pressure.
275  * In user mode vfork() cannot have a stack frame, and if
276  * done by calling the "clone()" system call directly, you
277  * do not have enough call-clobbered registers to hold all
278  * the information you need.
279  */
280 int sys_vfork(struct pt_regs *regs)
281 {
282 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
283 		       NULL, NULL);
284 }
285 
286 long
287 sys_clone(unsigned long clone_flags, unsigned long newsp,
288 	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
289 {
290 	if (!newsp)
291 		newsp = regs->sp;
292 	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
293 }
294 
295 /*
296  * This gets run with %si containing the
297  * function to call, and %di containing
298  * the "args".
299  */
300 extern void kernel_thread_helper(void);
301 
302 /*
303  * Create a kernel thread
304  */
305 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
306 {
307 	struct pt_regs regs;
308 
309 	memset(&regs, 0, sizeof(regs));
310 
311 	regs.si = (unsigned long) fn;
312 	regs.di = (unsigned long) arg;
313 
314 #ifdef CONFIG_X86_32
315 	regs.ds = __USER_DS;
316 	regs.es = __USER_DS;
317 	regs.fs = __KERNEL_PERCPU;
318 	regs.gs = __KERNEL_STACK_CANARY;
319 #else
320 	regs.ss = __KERNEL_DS;
321 #endif
322 
323 	regs.orig_ax = -1;
324 	regs.ip = (unsigned long) kernel_thread_helper;
325 	regs.cs = __KERNEL_CS | get_kernel_rpl();
326 	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
327 
328 	/* Ok, create the new process.. */
329 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
330 }
331 EXPORT_SYMBOL(kernel_thread);
332 
333 /*
334  * sys_execve() executes a new program.
335  */
336 long sys_execve(const char __user *name,
337 		const char __user *const __user *argv,
338 		const char __user *const __user *envp, struct pt_regs *regs)
339 {
340 	long error;
341 	char *filename;
342 
343 	filename = getname(name);
344 	error = PTR_ERR(filename);
345 	if (IS_ERR(filename))
346 		return error;
347 	error = do_execve(filename, argv, envp, regs);
348 
349 #ifdef CONFIG_X86_32
350 	if (error == 0) {
351 		/* Make sure we don't return using sysenter.. */
352                 set_thread_flag(TIF_IRET);
353         }
354 #endif
355 
356 	putname(filename);
357 	return error;
358 }
359 
360 /*
361  * Idle related variables and functions
362  */
363 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
364 EXPORT_SYMBOL(boot_option_idle_override);
365 
366 /*
367  * Powermanagement idle function, if any..
368  */
369 void (*pm_idle)(void);
370 #ifdef CONFIG_APM_MODULE
371 EXPORT_SYMBOL(pm_idle);
372 #endif
373 
374 static inline int hlt_use_halt(void)
375 {
376 	return 1;
377 }
378 
379 #ifndef CONFIG_SMP
380 static inline void play_dead(void)
381 {
382 	BUG();
383 }
384 #endif
385 
386 #ifdef CONFIG_X86_64
387 void enter_idle(void)
388 {
389 	this_cpu_write(is_idle, 1);
390 	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
391 }
392 
393 static void __exit_idle(void)
394 {
395 	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
396 		return;
397 	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
398 }
399 
400 /* Called from interrupts to signify idle end */
401 void exit_idle(void)
402 {
403 	/* idle loop has pid 0 */
404 	if (current->pid)
405 		return;
406 	__exit_idle();
407 }
408 #endif
409 
410 /*
411  * The idle thread. There's no useful work to be
412  * done, so just try to conserve power and have a
413  * low exit latency (ie sit in a loop waiting for
414  * somebody to say that they'd like to reschedule)
415  */
416 void cpu_idle(void)
417 {
418 	/*
419 	 * If we're the non-boot CPU, nothing set the stack canary up
420 	 * for us.  CPU0 already has it initialized but no harm in
421 	 * doing it again.  This is a good place for updating it, as
422 	 * we wont ever return from this function (so the invalid
423 	 * canaries already on the stack wont ever trigger).
424 	 */
425 	boot_init_stack_canary();
426 	current_thread_info()->status |= TS_POLLING;
427 
428 	while (1) {
429 		tick_nohz_idle_enter();
430 
431 		while (!need_resched()) {
432 			rmb();
433 
434 			if (cpu_is_offline(smp_processor_id()))
435 				play_dead();
436 
437 			/*
438 			 * Idle routines should keep interrupts disabled
439 			 * from here on, until they go to idle.
440 			 * Otherwise, idle callbacks can misfire.
441 			 */
442 			local_touch_nmi();
443 			local_irq_disable();
444 
445 			enter_idle();
446 
447 			/* Don't trace irqs off for idle */
448 			stop_critical_timings();
449 
450 			/* enter_idle() needs rcu for notifiers */
451 			rcu_idle_enter();
452 
453 			if (cpuidle_idle_call())
454 				pm_idle();
455 
456 			rcu_idle_exit();
457 			start_critical_timings();
458 
459 			/* In many cases the interrupt that ended idle
460 			   has already called exit_idle. But some idle
461 			   loops can be woken up without interrupt. */
462 			__exit_idle();
463 		}
464 
465 		tick_nohz_idle_exit();
466 		preempt_enable_no_resched();
467 		schedule();
468 		preempt_disable();
469 	}
470 }
471 
472 /*
473  * We use this if we don't have any better
474  * idle routine..
475  */
476 void default_idle(void)
477 {
478 	if (hlt_use_halt()) {
479 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
480 		trace_cpu_idle_rcuidle(1, smp_processor_id());
481 		current_thread_info()->status &= ~TS_POLLING;
482 		/*
483 		 * TS_POLLING-cleared state must be visible before we
484 		 * test NEED_RESCHED:
485 		 */
486 		smp_mb();
487 
488 		if (!need_resched())
489 			safe_halt();	/* enables interrupts racelessly */
490 		else
491 			local_irq_enable();
492 		current_thread_info()->status |= TS_POLLING;
493 		trace_power_end_rcuidle(smp_processor_id());
494 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
495 	} else {
496 		local_irq_enable();
497 		/* loop is done by the caller */
498 		cpu_relax();
499 	}
500 }
501 #ifdef CONFIG_APM_MODULE
502 EXPORT_SYMBOL(default_idle);
503 #endif
504 
505 bool set_pm_idle_to_default(void)
506 {
507 	bool ret = !!pm_idle;
508 
509 	pm_idle = default_idle;
510 
511 	return ret;
512 }
513 void stop_this_cpu(void *dummy)
514 {
515 	local_irq_disable();
516 	/*
517 	 * Remove this CPU:
518 	 */
519 	set_cpu_online(smp_processor_id(), false);
520 	disable_local_APIC();
521 
522 	for (;;) {
523 		if (hlt_works(smp_processor_id()))
524 			halt();
525 	}
526 }
527 
528 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
529 static void mwait_idle(void)
530 {
531 	if (!need_resched()) {
532 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
533 		trace_cpu_idle_rcuidle(1, smp_processor_id());
534 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
535 			clflush((void *)&current_thread_info()->flags);
536 
537 		__monitor((void *)&current_thread_info()->flags, 0, 0);
538 		smp_mb();
539 		if (!need_resched())
540 			__sti_mwait(0, 0);
541 		else
542 			local_irq_enable();
543 		trace_power_end_rcuidle(smp_processor_id());
544 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
545 	} else
546 		local_irq_enable();
547 }
548 
549 /*
550  * On SMP it's slightly faster (but much more power-consuming!)
551  * to poll the ->work.need_resched flag instead of waiting for the
552  * cross-CPU IPI to arrive. Use this option with caution.
553  */
554 static void poll_idle(void)
555 {
556 	trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
557 	trace_cpu_idle_rcuidle(0, smp_processor_id());
558 	local_irq_enable();
559 	while (!need_resched())
560 		cpu_relax();
561 	trace_power_end_rcuidle(smp_processor_id());
562 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
563 }
564 
565 /*
566  * mwait selection logic:
567  *
568  * It depends on the CPU. For AMD CPUs that support MWAIT this is
569  * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
570  * then depend on a clock divisor and current Pstate of the core. If
571  * all cores of a processor are in halt state (C1) the processor can
572  * enter the C1E (C1 enhanced) state. If mwait is used this will never
573  * happen.
574  *
575  * idle=mwait overrides this decision and forces the usage of mwait.
576  */
577 
578 #define MWAIT_INFO			0x05
579 #define MWAIT_ECX_EXTENDED_INFO		0x01
580 #define MWAIT_EDX_C1			0xf0
581 
582 int mwait_usable(const struct cpuinfo_x86 *c)
583 {
584 	u32 eax, ebx, ecx, edx;
585 
586 	/* Use mwait if idle=mwait boot option is given */
587 	if (boot_option_idle_override == IDLE_FORCE_MWAIT)
588 		return 1;
589 
590 	/*
591 	 * Any idle= boot option other than idle=mwait means that we must not
592 	 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
593 	 */
594 	if (boot_option_idle_override != IDLE_NO_OVERRIDE)
595 		return 0;
596 
597 	if (c->cpuid_level < MWAIT_INFO)
598 		return 0;
599 
600 	cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
601 	/* Check, whether EDX has extended info about MWAIT */
602 	if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
603 		return 1;
604 
605 	/*
606 	 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
607 	 * C1  supports MWAIT
608 	 */
609 	return (edx & MWAIT_EDX_C1);
610 }
611 
612 bool amd_e400_c1e_detected;
613 EXPORT_SYMBOL(amd_e400_c1e_detected);
614 
615 static cpumask_var_t amd_e400_c1e_mask;
616 
617 void amd_e400_remove_cpu(int cpu)
618 {
619 	if (amd_e400_c1e_mask != NULL)
620 		cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
621 }
622 
623 /*
624  * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
625  * pending message MSR. If we detect C1E, then we handle it the same
626  * way as C3 power states (local apic timer and TSC stop)
627  */
628 static void amd_e400_idle(void)
629 {
630 	if (need_resched())
631 		return;
632 
633 	if (!amd_e400_c1e_detected) {
634 		u32 lo, hi;
635 
636 		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
637 
638 		if (lo & K8_INTP_C1E_ACTIVE_MASK) {
639 			amd_e400_c1e_detected = true;
640 			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
641 				mark_tsc_unstable("TSC halt in AMD C1E");
642 			pr_info("System has AMD C1E enabled\n");
643 		}
644 	}
645 
646 	if (amd_e400_c1e_detected) {
647 		int cpu = smp_processor_id();
648 
649 		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
650 			cpumask_set_cpu(cpu, amd_e400_c1e_mask);
651 			/*
652 			 * Force broadcast so ACPI can not interfere.
653 			 */
654 			clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
655 					   &cpu);
656 			pr_info("Switch to broadcast mode on CPU%d\n", cpu);
657 		}
658 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
659 
660 		default_idle();
661 
662 		/*
663 		 * The switch back from broadcast mode needs to be
664 		 * called with interrupts disabled.
665 		 */
666 		 local_irq_disable();
667 		 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
668 		 local_irq_enable();
669 	} else
670 		default_idle();
671 }
672 
673 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
674 {
675 #ifdef CONFIG_SMP
676 	if (pm_idle == poll_idle && smp_num_siblings > 1) {
677 		pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
678 	}
679 #endif
680 	if (pm_idle)
681 		return;
682 
683 	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
684 		/*
685 		 * One CPU supports mwait => All CPUs supports mwait
686 		 */
687 		pr_info("using mwait in idle threads\n");
688 		pm_idle = mwait_idle;
689 	} else if (cpu_has_amd_erratum(amd_erratum_400)) {
690 		/* E400: APIC timer interrupt does not wake up CPU from C1e */
691 		pr_info("using AMD E400 aware idle routine\n");
692 		pm_idle = amd_e400_idle;
693 	} else
694 		pm_idle = default_idle;
695 }
696 
697 void __init init_amd_e400_c1e_mask(void)
698 {
699 	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
700 	if (pm_idle == amd_e400_idle)
701 		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
702 }
703 
704 static int __init idle_setup(char *str)
705 {
706 	if (!str)
707 		return -EINVAL;
708 
709 	if (!strcmp(str, "poll")) {
710 		pr_info("using polling idle threads\n");
711 		pm_idle = poll_idle;
712 		boot_option_idle_override = IDLE_POLL;
713 	} else if (!strcmp(str, "mwait")) {
714 		boot_option_idle_override = IDLE_FORCE_MWAIT;
715 		WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
716 	} else if (!strcmp(str, "halt")) {
717 		/*
718 		 * When the boot option of idle=halt is added, halt is
719 		 * forced to be used for CPU idle. In such case CPU C2/C3
720 		 * won't be used again.
721 		 * To continue to load the CPU idle driver, don't touch
722 		 * the boot_option_idle_override.
723 		 */
724 		pm_idle = default_idle;
725 		boot_option_idle_override = IDLE_HALT;
726 	} else if (!strcmp(str, "nomwait")) {
727 		/*
728 		 * If the boot option of "idle=nomwait" is added,
729 		 * it means that mwait will be disabled for CPU C2/C3
730 		 * states. In such case it won't touch the variable
731 		 * of boot_option_idle_override.
732 		 */
733 		boot_option_idle_override = IDLE_NOMWAIT;
734 	} else
735 		return -1;
736 
737 	return 0;
738 }
739 early_param("idle", idle_setup);
740 
741 unsigned long arch_align_stack(unsigned long sp)
742 {
743 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
744 		sp -= get_random_int() % 8192;
745 	return sp & ~0xf;
746 }
747 
748 unsigned long arch_randomize_brk(struct mm_struct *mm)
749 {
750 	unsigned long range_end = mm->brk + 0x02000000;
751 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
752 }
753 
754