1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2025 Ant Group
4 * Author: Tiwei Bie <tiwei.btw@antgroup.com>
5 *
6 * Based on the previous implementation in TT mode
7 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
8 */
9
10 #include <linux/sched.h>
11 #include <linux/sched/task.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/module.h>
14 #include <linux/processor.h>
15 #include <linux/threads.h>
16 #include <linux/cpu.h>
17 #include <linux/hardirq.h>
18 #include <linux/smp.h>
19 #include <linux/smp-internal.h>
20 #include <init.h>
21 #include <kern.h>
22 #include <os.h>
23 #include <smp.h>
24
25 enum {
26 UML_IPI_RES = 0,
27 UML_IPI_CALL_SINGLE,
28 UML_IPI_CALL,
29 UML_IPI_STOP,
30 };
31
arch_smp_send_reschedule(int cpu)32 void arch_smp_send_reschedule(int cpu)
33 {
34 os_send_ipi(cpu, UML_IPI_RES);
35 }
36
arch_send_call_function_single_ipi(int cpu)37 void arch_send_call_function_single_ipi(int cpu)
38 {
39 os_send_ipi(cpu, UML_IPI_CALL_SINGLE);
40 }
41
arch_send_call_function_ipi_mask(const struct cpumask * mask)42 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
43 {
44 int cpu;
45
46 for_each_cpu(cpu, mask)
47 os_send_ipi(cpu, UML_IPI_CALL);
48 }
49
smp_send_stop(void)50 void smp_send_stop(void)
51 {
52 int cpu, me = smp_processor_id();
53
54 for_each_online_cpu(cpu) {
55 if (cpu == me)
56 continue;
57 os_send_ipi(cpu, UML_IPI_STOP);
58 }
59 }
60
ipi_handler(int vector,struct uml_pt_regs * regs)61 static void ipi_handler(int vector, struct uml_pt_regs *regs)
62 {
63 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
64 int cpu = raw_smp_processor_id();
65
66 irq_enter();
67
68 if (current->mm)
69 os_alarm_process(current->mm->context.id.pid);
70
71 switch (vector) {
72 case UML_IPI_RES:
73 inc_irq_stat(irq_resched_count);
74 scheduler_ipi();
75 break;
76
77 case UML_IPI_CALL_SINGLE:
78 inc_irq_stat(irq_call_count);
79 generic_smp_call_function_single_interrupt();
80 break;
81
82 case UML_IPI_CALL:
83 inc_irq_stat(irq_call_count);
84 generic_smp_call_function_interrupt();
85 break;
86
87 case UML_IPI_STOP:
88 set_cpu_online(cpu, false);
89 while (1)
90 pause();
91 break;
92
93 default:
94 pr_err("CPU#%d received unknown IPI (vector=%d)!\n", cpu, vector);
95 break;
96 }
97
98 irq_exit();
99 set_irq_regs(old_regs);
100 }
101
uml_ipi_handler(int vector)102 void uml_ipi_handler(int vector)
103 {
104 struct uml_pt_regs r = { .is_user = 0 };
105
106 preempt_disable();
107 ipi_handler(vector, &r);
108 preempt_enable();
109 }
110
111 /* AP states used only during CPU startup */
112 enum {
113 UML_CPU_PAUSED = 0,
114 UML_CPU_RUNNING,
115 };
116
117 static int cpu_states[NR_CPUS];
118
start_secondary(void * unused)119 static int start_secondary(void *unused)
120 {
121 int err, cpu = raw_smp_processor_id();
122
123 notify_cpu_starting(cpu);
124 set_cpu_online(cpu, true);
125
126 err = um_setup_timer();
127 if (err)
128 panic("CPU#%d failed to setup timer, err = %d", cpu, err);
129
130 local_irq_enable();
131
132 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
133
134 return 0;
135 }
136
uml_start_secondary(void * opaque)137 void uml_start_secondary(void *opaque)
138 {
139 int cpu = raw_smp_processor_id();
140 struct mm_struct *mm = &init_mm;
141 struct task_struct *idle;
142
143 stack_protections((unsigned long) &cpu_irqstacks[cpu]);
144 set_sigstack(&cpu_irqstacks[cpu], THREAD_SIZE);
145
146 set_cpu_present(cpu, true);
147 os_futex_wait(&cpu_states[cpu], UML_CPU_PAUSED);
148
149 smp_rmb(); /* paired with smp_wmb() in __cpu_up() */
150
151 idle = cpu_tasks[cpu];
152 idle->thread_info.cpu = cpu;
153
154 mmgrab(mm);
155 idle->active_mm = mm;
156
157 idle->thread.request.thread.proc = start_secondary;
158 idle->thread.request.thread.arg = NULL;
159
160 new_thread(task_stack_page(idle), &idle->thread.switch_buf,
161 new_thread_handler);
162 os_start_secondary(opaque, &idle->thread.switch_buf);
163 }
164
smp_prepare_cpus(unsigned int max_cpus)165 void __init smp_prepare_cpus(unsigned int max_cpus)
166 {
167 int err, cpu, me = smp_processor_id();
168 unsigned long deadline;
169
170 os_init_smp();
171
172 for_each_possible_cpu(cpu) {
173 if (cpu == me)
174 continue;
175
176 pr_debug("Booting processor %d...\n", cpu);
177 err = os_start_cpu_thread(cpu);
178 if (err) {
179 pr_crit("CPU#%d failed to start cpu thread, err = %d",
180 cpu, err);
181 continue;
182 }
183
184 deadline = jiffies + msecs_to_jiffies(1000);
185 spin_until_cond(cpu_present(cpu) ||
186 time_is_before_jiffies(deadline));
187
188 if (!cpu_present(cpu))
189 pr_crit("CPU#%d failed to boot\n", cpu);
190 }
191 }
192
__cpu_up(unsigned int cpu,struct task_struct * tidle)193 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
194 {
195 cpu_tasks[cpu] = tidle;
196 smp_wmb(); /* paired with smp_rmb() in uml_start_secondary() */
197 cpu_states[cpu] = UML_CPU_RUNNING;
198 os_futex_wake(&cpu_states[cpu]);
199 spin_until_cond(cpu_online(cpu));
200
201 return 0;
202 }
203
smp_cpus_done(unsigned int max_cpus)204 void __init smp_cpus_done(unsigned int max_cpus)
205 {
206 }
207
208 /* Set in uml_ncpus_setup */
209 int uml_ncpus = 1;
210
prefill_possible_map(void)211 void __init prefill_possible_map(void)
212 {
213 int cpu;
214
215 for (cpu = 0; cpu < uml_ncpus; cpu++)
216 set_cpu_possible(cpu, true);
217 for (; cpu < NR_CPUS; cpu++)
218 set_cpu_possible(cpu, false);
219 }
220
uml_ncpus_setup(char * line,int * add)221 static int __init uml_ncpus_setup(char *line, int *add)
222 {
223 *add = 0;
224
225 if (kstrtoint(line, 10, ¨_ncpus)) {
226 os_warn("%s: Couldn't parse '%s'\n", __func__, line);
227 return -1;
228 }
229
230 uml_ncpus = clamp(uml_ncpus, 1, NR_CPUS);
231
232 return 0;
233 }
234
235 __uml_setup("ncpus=", uml_ncpus_setup,
236 "ncpus=<# of desired CPUs>\n"
237 " This tells UML how many virtual processors to start. The maximum\n"
238 " number of supported virtual processors can be obtained by querying\n"
239 " the CONFIG_NR_CPUS option using --showconfig.\n\n"
240 );
241
242 EXPORT_SYMBOL(uml_curr_cpu);
243