smpboot.c (c8710ad38900153af7a3e6762e99c062cfa46443) smpboot.c (d7c53c9e822a4fefa13a0cae76f3190bfd0d5c11)
1/*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to

--- 59 unchanged lines hidden (view full) ---

68#include <asm/uv/uv.h>
69#include <linux/mc146818rtc.h>
70
71#include <asm/smpboot_hooks.h>
72#include <asm/i8259.h>
73
74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID];
1/*
2 * x86 SMP booting functions
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to

--- 59 unchanged lines hidden (view full) ---

68#include <asm/uv/uv.h>
69#include <linux/mc146818rtc.h>
70
71#include <asm/smpboot_hooks.h>
72#include <asm/i8259.h>
73
74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID];
76static int low_mappings;
77#endif
78
79/* State of each CPU */
80DEFINE_PER_CPU(int, cpu_state) = { 0 };
81
82/* Store all idle threads, this can be reused instead of creating
83* a new thread. Also avoids complicated thread destroy functionality
84* for idle threads.
85*/
86#ifdef CONFIG_HOTPLUG_CPU
87/*
88 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
89 * removed after init for !CONFIG_HOTPLUG_CPU.
90 */
91static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
92#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
93#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
76#endif
77
78/* State of each CPU */
79DEFINE_PER_CPU(int, cpu_state) = { 0 };
80
81/* Store all idle threads, this can be reused instead of creating
82* a new thread. Also avoids complicated thread destroy functionality
83* for idle threads.
84*/
85#ifdef CONFIG_HOTPLUG_CPU
86/*
87 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
88 * removed after init for !CONFIG_HOTPLUG_CPU.
89 */
90static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
91#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
92#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
93
94/*
95 * We need this for trampoline_base protection from concurrent accesses when
96 * off- and onlining cores wildly.
97 */
98static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
99
100void cpu_hotplug_driver_lock()
101{
102 mutex_lock(&x86_cpu_hotplug_driver_mutex);
103}
104
105void cpu_hotplug_driver_unlock()
106{
107 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
108}
109
110ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
111ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
94#else
95static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
96#define get_idle_for_cpu(x) (idle_thread_array[(x)])
97#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
98#endif
99
100/* Number of siblings per CPU package */
101int smp_num_siblings = 1;

--- 174 unchanged lines hidden (view full) ---

276 */
277notrace static void __cpuinit start_secondary(void *unused)
278{
279 /*
280 * Don't put *anything* before cpu_init(), SMP booting is too
281 * fragile that we want to limit the things done here to the
282 * most necessary things.
283 */
112#else
113static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
114#define get_idle_for_cpu(x) (idle_thread_array[(x)])
115#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
116#endif
117
118/* Number of siblings per CPU package */
119int smp_num_siblings = 1;

--- 174 unchanged lines hidden (view full) ---

294 */
295notrace static void __cpuinit start_secondary(void *unused)
296{
297 /*
298 * Don't put *anything* before cpu_init(), SMP booting is too
299 * fragile that we want to limit the things done here to the
300 * most necessary things.
301 */
302
303#ifdef CONFIG_X86_32
304 /*
305 * Switch away from the trampoline page-table
306 *
307 * Do this before cpu_init() because it needs to access per-cpu
308 * data which may not be mapped in the trampoline page-table.
309 */
310 load_cr3(swapper_pg_dir);
311 __flush_tlb_all();
312#endif
313
284 vmi_bringup();
285 cpu_init();
286 preempt_disable();
287 smp_callin();
288
289 /* otherwise gcc will move up smp_processor_id before the cpu_init */
290 barrier();
291 /*
292 * Check TSC synchronization with the BP:
293 */
294 check_tsc_sync_target();
295
296 if (nmi_watchdog == NMI_IO_APIC) {
297 legacy_pic->chip->mask(0);
298 enable_NMI_through_LVT0();
299 legacy_pic->chip->unmask(0);
300 }
301
314 vmi_bringup();
315 cpu_init();
316 preempt_disable();
317 smp_callin();
318
319 /* otherwise gcc will move up smp_processor_id before the cpu_init */
320 barrier();
321 /*
322 * Check TSC synchronization with the BP:
323 */
324 check_tsc_sync_target();
325
326 if (nmi_watchdog == NMI_IO_APIC) {
327 legacy_pic->chip->mask(0);
328 enable_NMI_through_LVT0();
329 legacy_pic->chip->unmask(0);
330 }
331
302#ifdef CONFIG_X86_32
303 while (low_mappings)
304 cpu_relax();
305 __flush_tlb_all();
306#endif
307
308 /* This must be done before setting cpu_online_mask */
309 set_cpu_sibling_map(raw_smp_processor_id());
310 wmb();
311
312 /*
313 * We need to hold call_lock, so there is no inconsistency
314 * between the time smp_call_function() determines number of
315 * IPI recipients, and the time when the determination is made

--- 429 unchanged lines hidden (view full) ---

745 }
746
747 set_idle_for_cpu(cpu, c_idle.idle);
748do_rest:
749 per_cpu(current_task, cpu) = c_idle.idle;
750#ifdef CONFIG_X86_32
751 /* Stack for startup_32 can be just as for start_secondary onwards */
752 irq_ctx_init(cpu);
332 /* This must be done before setting cpu_online_mask */
333 set_cpu_sibling_map(raw_smp_processor_id());
334 wmb();
335
336 /*
337 * We need to hold call_lock, so there is no inconsistency
338 * between the time smp_call_function() determines number of
339 * IPI recipients, and the time when the determination is made

--- 429 unchanged lines hidden (view full) ---

769 }
770
771 set_idle_for_cpu(cpu, c_idle.idle);
772do_rest:
773 per_cpu(current_task, cpu) = c_idle.idle;
774#ifdef CONFIG_X86_32
775 /* Stack for startup_32 can be just as for start_secondary onwards */
776 irq_ctx_init(cpu);
777 initial_page_table = __pa(&trampoline_pg_dir);
753#else
754 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
755 initial_gs = per_cpu_offset(cpu);
756 per_cpu(kernel_stack, cpu) =
757 (unsigned long)task_stack_page(c_idle.idle) -
758 KERNEL_STACK_OFFSET + THREAD_SIZE;
759#endif
760 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);

--- 131 unchanged lines hidden (view full) ---

892 /*
893 * Save current MTRR state in case it was changed since early boot
894 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
895 */
896 mtrr_save_state();
897
898 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
899
778#else
779 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
780 initial_gs = per_cpu_offset(cpu);
781 per_cpu(kernel_stack, cpu) =
782 (unsigned long)task_stack_page(c_idle.idle) -
783 KERNEL_STACK_OFFSET + THREAD_SIZE;
784#endif
785 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);

--- 131 unchanged lines hidden (view full) ---

917 /*
918 * Save current MTRR state in case it was changed since early boot
919 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
920 */
921 mtrr_save_state();
922
923 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
924
900#ifdef CONFIG_X86_32
901 /* init low mem mapping */
902 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
903 min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
904 flush_tlb_all();
905 low_mappings = 1;
906
907 err = do_boot_cpu(apicid, cpu);
908
925 err = do_boot_cpu(apicid, cpu);
926
909 zap_low_mappings(false);
910 low_mappings = 0;
911#else
912 err = do_boot_cpu(apicid, cpu);
913#endif
914 if (err) {
915 pr_debug("do_boot_cpu failed %d\n", err);
916 return -EIO;
917 }
918
919 /*
920 * Check TSC synchronization with the AP (keep irqs disabled
921 * while doing so):

--- 476 unchanged lines hidden ---
927 if (err) {
928 pr_debug("do_boot_cpu failed %d\n", err);
929 return -EIO;
930 }
931
932 /*
933 * Check TSC synchronization with the AP (keep irqs disabled
934 * while doing so):

--- 476 unchanged lines hidden ---