1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #include <linux/acpi.h> 7 #include <linux/cpu.h> 8 #include <linux/ctype.h> 9 #include <linux/init.h> 10 #include <linux/seq_file.h> 11 #include <linux/of.h> 12 #include <asm/acpi.h> 13 #include <asm/cpufeature.h> 14 #include <asm/csr.h> 15 #include <asm/hwcap.h> 16 #include <asm/sbi.h> 17 #include <asm/smp.h> 18 #include <asm/pgtable.h> 19 #include <asm/vendor_extensions.h> 20 21 bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 22 { 23 return phys_id == cpuid_to_hartid_map(cpu); 24 } 25 26 /* 27 * Returns the hart ID of the given device tree node, or -ENODEV if the node 28 * isn't an enabled and valid RISC-V hart node. 29 */ 30 int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart) 31 { 32 int cpu; 33 34 *hart = (unsigned long)of_get_cpu_hwid(node, 0); 35 if (*hart == ~0UL) { 36 pr_warn("Found CPU without hart ID\n"); 37 return -ENODEV; 38 } 39 40 cpu = riscv_hartid_to_cpuid(*hart); 41 if (cpu < 0) 42 return cpu; 43 44 if (!cpu_possible(cpu)) 45 return -ENODEV; 46 47 return 0; 48 } 49 50 int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart) 51 { 52 const char *isa; 53 54 if (!of_device_is_compatible(node, "riscv")) { 55 pr_warn("Found incompatible CPU\n"); 56 return -ENODEV; 57 } 58 59 *hart = (unsigned long)of_get_cpu_hwid(node, 0); 60 if (*hart == ~0UL) { 61 pr_warn("Found CPU without hart ID\n"); 62 return -ENODEV; 63 } 64 65 if (!of_device_is_available(node)) { 66 pr_info("CPU with hartid=%lu is not available\n", *hart); 67 return -ENODEV; 68 } 69 70 if (of_property_read_string(node, "riscv,isa-base", &isa)) 71 goto old_interface; 72 73 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) { 74 pr_warn("CPU with hartid=%lu does not support rv32i", *hart); 75 return -ENODEV; 76 } 77 78 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) { 79 pr_warn("CPU with hartid=%lu does not support rv64i", *hart); 80 return -ENODEV; 81 } 82 83 if (!of_property_present(node, "riscv,isa-extensions")) 84 return -ENODEV; 85 86 if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 || 87 of_property_match_string(node, "riscv,isa-extensions", "m") < 0 || 88 of_property_match_string(node, "riscv,isa-extensions", "a") < 0) { 89 pr_warn("CPU with hartid=%lu does not support ima", *hart); 90 return -ENODEV; 91 } 92 93 return 0; 94 95 old_interface: 96 if (!riscv_isa_fallback) { 97 pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"", 98 *hart); 99 return -ENODEV; 100 } 101 102 if (of_property_read_string(node, "riscv,isa", &isa)) { 103 pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n", 104 *hart); 105 return -ENODEV; 106 } 107 108 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) { 109 pr_warn("CPU with hartid=%lu does not support rv32ima", *hart); 110 return -ENODEV; 111 } 112 113 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) { 114 pr_warn("CPU with hartid=%lu does not support rv64ima", *hart); 115 return -ENODEV; 116 } 117 118 return 0; 119 } 120 121 /* 122 * Find hart ID of the CPU DT node under which given DT node falls. 123 * 124 * To achieve this, we walk up the DT tree until we find an active 125 * RISC-V core (HART) node and extract the cpuid from it. 126 */ 127 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) 128 { 129 for (; node; node = node->parent) { 130 if (of_device_is_compatible(node, "riscv")) { 131 *hartid = (unsigned long)of_get_cpu_hwid(node, 0); 132 if (*hartid == ~0UL) { 133 pr_warn("Found CPU without hart ID\n"); 134 return -ENODEV; 135 } 136 return 0; 137 } 138 } 139 140 return -1; 141 } 142 143 unsigned long __init riscv_get_marchid(void) 144 { 145 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); 146 147 #if IS_ENABLED(CONFIG_RISCV_SBI) 148 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid(); 149 #elif IS_ENABLED(CONFIG_RISCV_M_MODE) 150 ci->marchid = csr_read(CSR_MARCHID); 151 #else 152 ci->marchid = 0; 153 #endif 154 return ci->marchid; 155 } 156 157 unsigned long __init riscv_get_mvendorid(void) 158 { 159 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); 160 161 #if IS_ENABLED(CONFIG_RISCV_SBI) 162 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid(); 163 #elif IS_ENABLED(CONFIG_RISCV_M_MODE) 164 ci->mvendorid = csr_read(CSR_MVENDORID); 165 #else 166 ci->mvendorid = 0; 167 #endif 168 return ci->mvendorid; 169 } 170 171 DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); 172 173 unsigned long riscv_cached_mvendorid(unsigned int cpu_id) 174 { 175 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); 176 177 return ci->mvendorid; 178 } 179 EXPORT_SYMBOL(riscv_cached_mvendorid); 180 181 unsigned long riscv_cached_marchid(unsigned int cpu_id) 182 { 183 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); 184 185 return ci->marchid; 186 } 187 EXPORT_SYMBOL(riscv_cached_marchid); 188 189 unsigned long riscv_cached_mimpid(unsigned int cpu_id) 190 { 191 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); 192 193 return ci->mimpid; 194 } 195 EXPORT_SYMBOL(riscv_cached_mimpid); 196 197 static int riscv_cpuinfo_starting(unsigned int cpu) 198 { 199 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); 200 201 #if IS_ENABLED(CONFIG_RISCV_SBI) 202 if (!ci->mvendorid) 203 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid(); 204 if (!ci->marchid) 205 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid(); 206 ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid(); 207 #elif IS_ENABLED(CONFIG_RISCV_M_MODE) 208 if (!ci->mvendorid) 209 ci->mvendorid = csr_read(CSR_MVENDORID); 210 if (!ci->marchid) 211 ci->marchid = csr_read(CSR_MARCHID); 212 ci->mimpid = csr_read(CSR_MIMPID); 213 #else 214 ci->mvendorid = 0; 215 ci->marchid = 0; 216 ci->mimpid = 0; 217 #endif 218 219 return 0; 220 } 221 222 static int __init riscv_cpuinfo_init(void) 223 { 224 int ret; 225 226 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting", 227 riscv_cpuinfo_starting, NULL); 228 if (ret < 0) { 229 pr_err("cpuinfo: failed to register hotplug callbacks.\n"); 230 return ret; 231 } 232 233 return 0; 234 } 235 arch_initcall(riscv_cpuinfo_init); 236 237 #ifdef CONFIG_PROC_FS 238 239 #define ALL_CPUS -1 240 241 static void print_vendor_isa(struct seq_file *f, int cpu) 242 { 243 struct riscv_isavendorinfo *vendor_bitmap; 244 struct riscv_isa_vendor_ext_data_list *ext_list; 245 const struct riscv_isa_ext_data *ext_data; 246 247 for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { 248 ext_list = riscv_isa_vendor_ext_list[i]; 249 ext_data = riscv_isa_vendor_ext_list[i]->ext_data; 250 251 if (cpu == ALL_CPUS) 252 vendor_bitmap = &ext_list->all_harts_isa_bitmap; 253 else 254 vendor_bitmap = &ext_list->per_hart_isa_bitmap[cpu]; 255 256 for (int j = 0; j < ext_list->ext_data_count; j++) { 257 if (!__riscv_isa_extension_available(vendor_bitmap->isa, ext_data[j].id)) 258 continue; 259 260 seq_printf(f, "_%s", ext_data[j].name); 261 } 262 } 263 } 264 265 static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap, int cpu) 266 { 267 268 if (IS_ENABLED(CONFIG_32BIT)) 269 seq_write(f, "rv32", 4); 270 else 271 seq_write(f, "rv64", 4); 272 273 for (int i = 0; i < riscv_isa_ext_count; i++) { 274 if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id)) 275 continue; 276 277 /* Only multi-letter extensions are split by underscores */ 278 if (strnlen(riscv_isa_ext[i].name, 2) != 1) 279 seq_puts(f, "_"); 280 281 seq_printf(f, "%s", riscv_isa_ext[i].name); 282 } 283 284 print_vendor_isa(f, cpu); 285 286 seq_puts(f, "\n"); 287 } 288 289 static void print_mmu(struct seq_file *f) 290 { 291 const char *sv_type; 292 293 #ifdef CONFIG_MMU 294 #if defined(CONFIG_32BIT) 295 sv_type = "sv32"; 296 #elif defined(CONFIG_64BIT) 297 if (pgtable_l5_enabled) 298 sv_type = "sv57"; 299 else if (pgtable_l4_enabled) 300 sv_type = "sv48"; 301 else 302 sv_type = "sv39"; 303 #endif 304 #else 305 sv_type = "none"; 306 #endif /* CONFIG_MMU */ 307 seq_printf(f, "mmu\t\t: %s\n", sv_type); 308 } 309 310 static void *c_start(struct seq_file *m, loff_t *pos) 311 { 312 if (*pos == nr_cpu_ids) 313 return NULL; 314 315 *pos = cpumask_next(*pos - 1, cpu_online_mask); 316 if ((*pos) < nr_cpu_ids) 317 return (void *)(uintptr_t)(1 + *pos); 318 return NULL; 319 } 320 321 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 322 { 323 (*pos)++; 324 return c_start(m, pos); 325 } 326 327 static void c_stop(struct seq_file *m, void *v) 328 { 329 } 330 331 static int c_show(struct seq_file *m, void *v) 332 { 333 unsigned long cpu_id = (unsigned long)v - 1; 334 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id); 335 struct device_node *node; 336 const char *compat; 337 338 seq_printf(m, "processor\t: %lu\n", cpu_id); 339 seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id)); 340 341 /* 342 * For historical raisins, the isa: line is limited to the lowest common 343 * denominator of extensions supported across all harts. A true list of 344 * extensions supported on this hart is printed later in the hart isa: 345 * line. 346 */ 347 seq_puts(m, "isa\t\t: "); 348 print_isa(m, NULL, ALL_CPUS); 349 print_mmu(m); 350 351 if (acpi_disabled) { 352 node = of_get_cpu_node(cpu_id, NULL); 353 354 if (!of_property_read_string(node, "compatible", &compat) && 355 strcmp(compat, "riscv")) 356 seq_printf(m, "uarch\t\t: %s\n", compat); 357 358 of_node_put(node); 359 } 360 361 seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid); 362 seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid); 363 seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid); 364 365 /* 366 * Print the ISA extensions specific to this hart, which may show 367 * additional extensions not present across all harts. 368 */ 369 seq_puts(m, "hart isa\t: "); 370 print_isa(m, hart_isa[cpu_id].isa, cpu_id); 371 seq_puts(m, "\n"); 372 373 return 0; 374 } 375 376 const struct seq_operations cpuinfo_op = { 377 .start = c_start, 378 .next = c_next, 379 .stop = c_stop, 380 .show = c_show 381 }; 382 383 #endif /* CONFIG_PROC_FS */ 384