smpboot.c (c95baf12f5077419db01313ab61c2aac007d40cd) smpboot.c (2875fe0561569f82d0e63658ccf0d11ce7da8922)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
5 *
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive

--- 11 unchanged lines hidden (view full) ---

20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/err.h>
23#include <linux/irq.h>
24#include <linux/of.h>
25#include <linux/sched/task_stack.h>
26#include <linux/sched/mm.h>
27#include <asm/clint.h>
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
5 *
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive

--- 11 unchanged lines hidden (view full) ---

20#include <linux/percpu.h>
21#include <linux/delay.h>
22#include <linux/err.h>
23#include <linux/irq.h>
24#include <linux/of.h>
25#include <linux/sched/task_stack.h>
26#include <linux/sched/mm.h>
27#include <asm/clint.h>
28#include <asm/cpu_ops.h>
28#include <asm/irq.h>
29#include <asm/mmu_context.h>
30#include <asm/tlbflush.h>
31#include <asm/sections.h>
32#include <asm/sbi.h>
33#include <asm/smp.h>
34
35#include "head.h"
36
29#include <asm/irq.h>
30#include <asm/mmu_context.h>
31#include <asm/tlbflush.h>
32#include <asm/sections.h>
33#include <asm/sbi.h>
34#include <asm/smp.h>
35
36#include "head.h"
37
37void *__cpu_up_stack_pointer[NR_CPUS];
38void *__cpu_up_task_pointer[NR_CPUS];
39static DECLARE_COMPLETION(cpu_running);
40
41void __init smp_prepare_boot_cpu(void)
42{
43 init_cpu_topology();
44}
45
46void __init smp_prepare_cpus(unsigned int max_cpus)
47{
48 int cpuid;
38static DECLARE_COMPLETION(cpu_running);
39
40void __init smp_prepare_boot_cpu(void)
41{
42 init_cpu_topology();
43}
44
45void __init smp_prepare_cpus(unsigned int max_cpus)
46{
47 int cpuid;
48 int ret;
49
50 /* This covers non-smp usecase mandated by "nosmp" option */
51 if (max_cpus == 0)
52 return;
53
54 for_each_possible_cpu(cpuid) {
55 if (cpuid == smp_processor_id())
56 continue;
49
50 /* This covers non-smp usecase mandated by "nosmp" option */
51 if (max_cpus == 0)
52 return;
53
54 for_each_possible_cpu(cpuid) {
55 if (cpuid == smp_processor_id())
56 continue;
57 if (cpu_ops[cpuid]->cpu_prepare) {
58 ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
59 if (ret)
60 continue;
61 }
57 set_cpu_present(cpuid, true);
58 }
59}
60
61void __init setup_smp(void)
62{
63 struct device_node *dn;
64 int hart;
65 bool found_boot_cpu = false;
66 int cpuid = 1;
67
62 set_cpu_present(cpuid, true);
63 }
64}
65
66void __init setup_smp(void)
67{
68 struct device_node *dn;
69 int hart;
70 bool found_boot_cpu = false;
71 int cpuid = 1;
72
73 cpu_set_ops(0);
74
68 for_each_of_cpu_node(dn) {
69 hart = riscv_of_processor_hartid(dn);
70 if (hart < 0)
71 continue;
72
73 if (hart == cpuid_to_hartid_map(0)) {
74 BUG_ON(found_boot_cpu);
75 found_boot_cpu = 1;

--- 11 unchanged lines hidden (view full) ---

87
88 BUG_ON(!found_boot_cpu);
89
90 if (cpuid > nr_cpu_ids)
91 pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
92 cpuid, nr_cpu_ids);
93
94 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
75 for_each_of_cpu_node(dn) {
76 hart = riscv_of_processor_hartid(dn);
77 if (hart < 0)
78 continue;
79
80 if (hart == cpuid_to_hartid_map(0)) {
81 BUG_ON(found_boot_cpu);
82 found_boot_cpu = 1;

--- 11 unchanged lines hidden (view full) ---

94
95 BUG_ON(!found_boot_cpu);
96
97 if (cpuid > nr_cpu_ids)
98 pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
99 cpuid, nr_cpu_ids);
100
101 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
95 if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
102 if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
103 cpu_set_ops(cpuid);
96 set_cpu_possible(cpuid, true);
104 set_cpu_possible(cpuid, true);
105 }
97 }
98}
99
106 }
107}
108
109int start_secondary_cpu(int cpu, struct task_struct *tidle)
110{
111 if (cpu_ops[cpu]->cpu_start)
112 return cpu_ops[cpu]->cpu_start(cpu, tidle);
113
114 return -EOPNOTSUPP;
115}
116
100int __cpu_up(unsigned int cpu, struct task_struct *tidle)
101{
102 int ret = 0;
117int __cpu_up(unsigned int cpu, struct task_struct *tidle)
118{
119 int ret = 0;
103 int hartid = cpuid_to_hartid_map(cpu);
104 tidle->thread_info.cpu = cpu;
105
120 tidle->thread_info.cpu = cpu;
121
106 /*
107 * On RISC-V systems, all harts boot on their own accord. Our _start
108 * selects the first hart to boot the kernel and causes the remainder
109 * of the harts to spin in a loop waiting for their stack pointer to be
110 * setup by that main hart. Writing __cpu_up_stack_pointer signals to
111 * the spinning harts that they can continue the boot process.
112 */
113 smp_mb();
114 WRITE_ONCE(__cpu_up_stack_pointer[hartid],
115 task_stack_page(tidle) + THREAD_SIZE);
116 WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
117
118 lockdep_assert_held(&cpu_running);
119 wait_for_completion_timeout(&cpu_running,
122 ret = start_secondary_cpu(cpu, tidle);
123 if (!ret) {
124 lockdep_assert_held(&cpu_running);
125 wait_for_completion_timeout(&cpu_running,
120 msecs_to_jiffies(1000));
121
126 msecs_to_jiffies(1000));
127
122 if (!cpu_online(cpu)) {
123 pr_crit("CPU%u: failed to come online\n", cpu);
124 ret = -EIO;
128 if (!cpu_online(cpu)) {
129 pr_crit("CPU%u: failed to come online\n", cpu);
130 ret = -EIO;
131 }
132 } else {
133 pr_crit("CPU%u: failed to start\n", cpu);
125 }
126
127 return ret;
128}
129
130void __init smp_cpus_done(unsigned int max_cpus)
131{
132}

--- 33 unchanged lines hidden ---
134 }
135
136 return ret;
137}
138
139void __init smp_cpus_done(unsigned int max_cpus)
140{
141}

--- 33 unchanged lines hidden ---