1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SMP initialisation and IPI support 4 * Based on arch/arm64/kernel/smp.c 5 * 6 * Copyright (C) 2012 ARM Ltd. 7 * Copyright (C) 2015 Regents of the University of California 8 * Copyright (C) 2017 SiFive 9 */ 10 11 #include <linux/acpi.h> 12 #include <linux/arch_topology.h> 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/notifier.h> 20 #include <linux/cpu.h> 21 #include <linux/percpu.h> 22 #include <linux/delay.h> 23 #include <linux/err.h> 24 #include <linux/irq.h> 25 #include <linux/of.h> 26 #include <linux/sched/task_stack.h> 27 #include <linux/sched/mm.h> 28 29 #include <asm/cacheflush.h> 30 #include <asm/cpu_ops.h> 31 #include <asm/irq.h> 32 #include <asm/mmu_context.h> 33 #include <asm/numa.h> 34 #include <asm/tlbflush.h> 35 #include <asm/sections.h> 36 #include <asm/smp.h> 37 #include <uapi/asm/hwcap.h> 38 #include <asm/vector.h> 39 40 #include "head.h" 41 42 static DECLARE_COMPLETION(cpu_running); 43 44 void __init smp_prepare_cpus(unsigned int max_cpus) 45 { 46 int cpuid; 47 unsigned int curr_cpuid; 48 49 init_cpu_topology(); 50 51 curr_cpuid = smp_processor_id(); 52 store_cpu_topology(curr_cpuid); 53 numa_store_cpu_info(curr_cpuid); 54 numa_add_cpu(curr_cpuid); 55 56 /* This covers non-smp usecase mandated by "nosmp" option */ 57 if (max_cpus == 0) 58 return; 59 60 for_each_possible_cpu(cpuid) { 61 if (cpuid == curr_cpuid) 62 continue; 63 set_cpu_present(cpuid, true); 64 numa_store_cpu_info(cpuid); 65 } 66 } 67 68 #ifdef CONFIG_ACPI 69 static unsigned int cpu_count = 1; 70 71 static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const unsigned long end) 72 { 73 unsigned long hart; 74 static bool found_boot_cpu; 75 struct acpi_madt_rintc *processor = (struct acpi_madt_rintc *)header; 76 77 /* 78 * Each RINTC structure in MADT will have a flag. If ACPI_MADT_ENABLED 79 * bit in the flag is not enabled, it means OS should not try to enable 80 * the cpu to which RINTC belongs. 81 */ 82 if (!(processor->flags & ACPI_MADT_ENABLED)) 83 return 0; 84 85 if (BAD_MADT_ENTRY(processor, end)) 86 return -EINVAL; 87 88 acpi_table_print_madt_entry(&header->common); 89 90 hart = processor->hart_id; 91 if (hart == INVALID_HARTID) { 92 pr_warn("Invalid hartid\n"); 93 return 0; 94 } 95 96 if (hart == cpuid_to_hartid_map(0)) { 97 BUG_ON(found_boot_cpu); 98 found_boot_cpu = true; 99 early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count)); 100 return 0; 101 } 102 103 if (cpu_count >= NR_CPUS) { 104 pr_warn("NR_CPUS is too small for the number of ACPI tables.\n"); 105 return 0; 106 } 107 108 cpuid_to_hartid_map(cpu_count) = hart; 109 early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count)); 110 cpu_count++; 111 112 return 0; 113 } 114 115 static void __init acpi_parse_and_init_cpus(void) 116 { 117 acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0); 118 } 119 #else 120 #define acpi_parse_and_init_cpus(...) do { } while (0) 121 #endif 122 123 static void __init of_parse_and_init_cpus(void) 124 { 125 struct device_node *dn; 126 unsigned long hart; 127 bool found_boot_cpu = false; 128 int cpuid = 1; 129 int rc; 130 131 for_each_of_cpu_node(dn) { 132 rc = riscv_early_of_processor_hartid(dn, &hart); 133 if (rc < 0) 134 continue; 135 136 if (hart == cpuid_to_hartid_map(0)) { 137 BUG_ON(found_boot_cpu); 138 found_boot_cpu = 1; 139 early_map_cpu_to_node(0, of_node_to_nid(dn)); 140 continue; 141 } 142 if (cpuid >= NR_CPUS) { 143 pr_warn("Invalid cpuid [%d] for hartid [%lu]\n", 144 cpuid, hart); 145 continue; 146 } 147 148 cpuid_to_hartid_map(cpuid) = hart; 149 early_map_cpu_to_node(cpuid, of_node_to_nid(dn)); 150 cpuid++; 151 } 152 153 BUG_ON(!found_boot_cpu); 154 155 if (cpuid > nr_cpu_ids) 156 pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n", 157 cpuid, nr_cpu_ids); 158 } 159 160 void __init setup_smp(void) 161 { 162 int cpuid; 163 164 cpu_set_ops(); 165 166 if (acpi_disabled) 167 of_parse_and_init_cpus(); 168 else 169 acpi_parse_and_init_cpus(); 170 171 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) 172 if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) 173 set_cpu_possible(cpuid, true); 174 } 175 176 static int start_secondary_cpu(int cpu, struct task_struct *tidle) 177 { 178 if (cpu_ops->cpu_start) 179 return cpu_ops->cpu_start(cpu, tidle); 180 181 return -EOPNOTSUPP; 182 } 183 184 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 185 { 186 int ret = 0; 187 tidle->thread_info.cpu = cpu; 188 189 ret = start_secondary_cpu(cpu, tidle); 190 if (!ret) { 191 wait_for_completion_timeout(&cpu_running, 192 msecs_to_jiffies(1000)); 193 194 if (!cpu_online(cpu)) { 195 pr_crit("CPU%u: failed to come online\n", cpu); 196 ret = -EIO; 197 } 198 } else { 199 pr_crit("CPU%u: failed to start\n", cpu); 200 } 201 202 return ret; 203 } 204 205 void __init smp_cpus_done(unsigned int max_cpus) 206 { 207 } 208 209 /* 210 * C entry point for a secondary processor. 211 */ 212 asmlinkage __visible void smp_callin(void) 213 { 214 struct mm_struct *mm = &init_mm; 215 unsigned int curr_cpuid = smp_processor_id(); 216 217 /* All kernel threads share the same mm context. */ 218 mmgrab(mm); 219 current->active_mm = mm; 220 221 store_cpu_topology(curr_cpuid); 222 notify_cpu_starting(curr_cpuid); 223 224 riscv_ipi_enable(); 225 226 numa_add_cpu(curr_cpuid); 227 set_cpu_online(curr_cpuid, true); 228 229 if (has_vector()) { 230 if (riscv_v_setup_vsize()) 231 elf_hwcap &= ~COMPAT_HWCAP_ISA_V; 232 } 233 234 riscv_user_isa_enable(); 235 236 /* 237 * Remote cache and TLB flushes are ignored while the CPU is offline, 238 * so flush them both right now just in case. 239 */ 240 local_flush_icache_all(); 241 local_flush_tlb_all(); 242 complete(&cpu_running); 243 /* 244 * Disable preemption before enabling interrupts, so we don't try to 245 * schedule a CPU that hasn't actually started yet. 246 */ 247 local_irq_enable(); 248 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); 249 } 250