1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The hwprobe interface, for allowing userspace to probe to see which features 4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for 5 * more details. 6 */ 7 #include <linux/syscalls.h> 8 #include <asm/cacheflush.h> 9 #include <asm/cpufeature.h> 10 #include <asm/hwprobe.h> 11 #include <asm/processor.h> 12 #include <asm/delay.h> 13 #include <asm/sbi.h> 14 #include <asm/switch_to.h> 15 #include <asm/uaccess.h> 16 #include <asm/unistd.h> 17 #include <asm/vector.h> 18 #include <asm/vendor_extensions/sifive_hwprobe.h> 19 #include <asm/vendor_extensions/thead_hwprobe.h> 20 #include <vdso/vsyscall.h> 21 22 23 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 24 const struct cpumask *cpus) 25 { 26 u64 id = -1ULL; 27 bool first = true; 28 int cpu; 29 30 for_each_cpu(cpu, cpus) { 31 u64 cpu_id; 32 33 switch (pair->key) { 34 case RISCV_HWPROBE_KEY_MVENDORID: 35 cpu_id = riscv_cached_mvendorid(cpu); 36 break; 37 case RISCV_HWPROBE_KEY_MIMPID: 38 cpu_id = riscv_cached_mimpid(cpu); 39 break; 40 case RISCV_HWPROBE_KEY_MARCHID: 41 cpu_id = riscv_cached_marchid(cpu); 42 break; 43 } 44 45 if (first) { 46 id = cpu_id; 47 first = false; 48 } 49 50 /* 51 * If there's a mismatch for the given set, return -1 in the 52 * value. 53 */ 54 if (id != cpu_id) { 55 id = -1ULL; 56 break; 57 } 58 } 59 60 pair->value = id; 61 } 62 63 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, 64 const struct cpumask *cpus) 65 { 66 int cpu; 67 u64 missing = 0; 68 69 pair->value = 0; 70 if (has_fpu()) 71 pair->value |= RISCV_HWPROBE_IMA_FD; 72 73 if (riscv_isa_extension_available(NULL, c)) 74 pair->value |= RISCV_HWPROBE_IMA_C; 75 76 if (has_vector() && riscv_isa_extension_available(NULL, v)) 77 pair->value |= RISCV_HWPROBE_IMA_V; 78 79 /* 80 * Loop through and record extensions that 1) anyone has, and 2) anyone 81 * doesn't have. 82 */ 83 for_each_cpu(cpu, cpus) { 84 struct riscv_isainfo *isainfo = &hart_isa[cpu]; 85 86 #define EXT_KEY(ext) \ 87 do { \ 88 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ 89 pair->value |= RISCV_HWPROBE_EXT_##ext; \ 90 else \ 91 missing |= RISCV_HWPROBE_EXT_##ext; \ 92 } while (false) 93 94 /* 95 * Only use EXT_KEY() for extensions which can be exposed to userspace, 96 * regardless of the kernel's configuration, as no other checks, besides 97 * presence in the hart_isa bitmap, are made. 98 */ 99 EXT_KEY(ZAAMO); 100 EXT_KEY(ZACAS); 101 EXT_KEY(ZALRSC); 102 EXT_KEY(ZAWRS); 103 EXT_KEY(ZBA); 104 EXT_KEY(ZBB); 105 EXT_KEY(ZBC); 106 EXT_KEY(ZBKB); 107 EXT_KEY(ZBKC); 108 EXT_KEY(ZBKX); 109 EXT_KEY(ZBS); 110 EXT_KEY(ZCA); 111 EXT_KEY(ZCB); 112 EXT_KEY(ZCMOP); 113 EXT_KEY(ZICBOM); 114 EXT_KEY(ZICBOZ); 115 EXT_KEY(ZICNTR); 116 EXT_KEY(ZICOND); 117 EXT_KEY(ZIHINTNTL); 118 EXT_KEY(ZIHINTPAUSE); 119 EXT_KEY(ZIHPM); 120 EXT_KEY(ZIMOP); 121 EXT_KEY(ZKND); 122 EXT_KEY(ZKNE); 123 EXT_KEY(ZKNH); 124 EXT_KEY(ZKSED); 125 EXT_KEY(ZKSH); 126 EXT_KEY(ZKT); 127 EXT_KEY(ZTSO); 128 129 /* 130 * All the following extensions must depend on the kernel 131 * support of V. 132 */ 133 if (has_vector()) { 134 EXT_KEY(ZVBB); 135 EXT_KEY(ZVBC); 136 EXT_KEY(ZVE32F); 137 EXT_KEY(ZVE32X); 138 EXT_KEY(ZVE64D); 139 EXT_KEY(ZVE64F); 140 EXT_KEY(ZVE64X); 141 EXT_KEY(ZVFBFMIN); 142 EXT_KEY(ZVFBFWMA); 143 EXT_KEY(ZVFH); 144 EXT_KEY(ZVFHMIN); 145 EXT_KEY(ZVKB); 146 EXT_KEY(ZVKG); 147 EXT_KEY(ZVKNED); 148 EXT_KEY(ZVKNHA); 149 EXT_KEY(ZVKNHB); 150 EXT_KEY(ZVKSED); 151 EXT_KEY(ZVKSH); 152 EXT_KEY(ZVKT); 153 } 154 155 if (has_fpu()) { 156 EXT_KEY(ZCD); 157 EXT_KEY(ZCF); 158 EXT_KEY(ZFA); 159 EXT_KEY(ZFBFMIN); 160 EXT_KEY(ZFH); 161 EXT_KEY(ZFHMIN); 162 } 163 164 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) 165 EXT_KEY(SUPM); 166 #undef EXT_KEY 167 } 168 169 /* Now turn off reporting features if any CPU is missing it. */ 170 pair->value &= ~missing; 171 } 172 173 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) 174 { 175 struct riscv_hwprobe pair; 176 177 hwprobe_isa_ext0(&pair, cpus); 178 return (pair.value & ext); 179 } 180 181 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) 182 static u64 hwprobe_misaligned(const struct cpumask *cpus) 183 { 184 int cpu; 185 u64 perf = -1ULL; 186 187 for_each_cpu(cpu, cpus) { 188 int this_perf = per_cpu(misaligned_access_speed, cpu); 189 190 if (perf == -1ULL) 191 perf = this_perf; 192 193 if (perf != this_perf) { 194 perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; 195 break; 196 } 197 } 198 199 if (perf == -1ULL) 200 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; 201 202 return perf; 203 } 204 #else 205 static u64 hwprobe_misaligned(const struct cpumask *cpus) 206 { 207 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS)) 208 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST; 209 210 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available()) 211 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED; 212 213 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW; 214 } 215 #endif 216 217 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED 218 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) 219 { 220 int cpu; 221 u64 perf = -1ULL; 222 223 /* Return if supported or not even if speed wasn't probed */ 224 for_each_cpu(cpu, cpus) { 225 int this_perf = per_cpu(vector_misaligned_access, cpu); 226 227 if (perf == -1ULL) 228 perf = this_perf; 229 230 if (perf != this_perf) { 231 perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 232 break; 233 } 234 } 235 236 if (perf == -1ULL) 237 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 238 239 return perf; 240 } 241 #else 242 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) 243 { 244 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS)) 245 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; 246 247 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS)) 248 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; 249 250 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 251 } 252 #endif 253 254 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 255 const struct cpumask *cpus) 256 { 257 switch (pair->key) { 258 case RISCV_HWPROBE_KEY_MVENDORID: 259 case RISCV_HWPROBE_KEY_MARCHID: 260 case RISCV_HWPROBE_KEY_MIMPID: 261 hwprobe_arch_id(pair, cpus); 262 break; 263 /* 264 * The kernel already assumes that the base single-letter ISA 265 * extensions are supported on all harts, and only supports the 266 * IMA base, so just cheat a bit here and tell that to 267 * userspace. 268 */ 269 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 270 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 271 break; 272 273 case RISCV_HWPROBE_KEY_IMA_EXT_0: 274 hwprobe_isa_ext0(pair, cpus); 275 break; 276 277 case RISCV_HWPROBE_KEY_CPUPERF_0: 278 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF: 279 pair->value = hwprobe_misaligned(cpus); 280 break; 281 282 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: 283 pair->value = hwprobe_vec_misaligned(cpus); 284 break; 285 286 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: 287 pair->value = 0; 288 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) 289 pair->value = riscv_cboz_block_size; 290 break; 291 case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE: 292 pair->value = 0; 293 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM)) 294 pair->value = riscv_cbom_block_size; 295 break; 296 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: 297 pair->value = user_max_virt_addr(); 298 break; 299 300 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ: 301 pair->value = riscv_timebase; 302 break; 303 304 case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0: 305 hwprobe_isa_vendor_ext_sifive_0(pair, cpus); 306 break; 307 308 case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: 309 hwprobe_isa_vendor_ext_thead_0(pair, cpus); 310 break; 311 312 /* 313 * For forward compatibility, unknown keys don't fail the whole 314 * call, but get their element key set to -1 and value set to 0 315 * indicating they're unrecognized. 316 */ 317 default: 318 pair->key = -1; 319 pair->value = 0; 320 break; 321 } 322 } 323 324 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs, 325 size_t pair_count, size_t cpusetsize, 326 unsigned long __user *cpus_user, 327 unsigned int flags) 328 { 329 size_t out; 330 int ret; 331 cpumask_t cpus; 332 333 /* Check the reserved flags. */ 334 if (flags != 0) 335 return -EINVAL; 336 337 /* 338 * The interface supports taking in a CPU mask, and returns values that 339 * are consistent across that mask. Allow userspace to specify NULL and 340 * 0 as a shortcut to all online CPUs. 341 */ 342 cpumask_clear(&cpus); 343 if (!cpusetsize && !cpus_user) { 344 cpumask_copy(&cpus, cpu_online_mask); 345 } else { 346 if (cpusetsize > cpumask_size()) 347 cpusetsize = cpumask_size(); 348 349 ret = copy_from_user(&cpus, cpus_user, cpusetsize); 350 if (ret) 351 return -EFAULT; 352 353 /* 354 * Userspace must provide at least one online CPU, without that 355 * there's no way to define what is supported. 356 */ 357 cpumask_and(&cpus, &cpus, cpu_online_mask); 358 if (cpumask_empty(&cpus)) 359 return -EINVAL; 360 } 361 362 for (out = 0; out < pair_count; out++, pairs++) { 363 struct riscv_hwprobe pair; 364 365 if (get_user(pair.key, &pairs->key)) 366 return -EFAULT; 367 368 pair.value = 0; 369 hwprobe_one_pair(&pair, &cpus); 370 ret = put_user(pair.key, &pairs->key); 371 if (ret == 0) 372 ret = put_user(pair.value, &pairs->value); 373 374 if (ret) 375 return -EFAULT; 376 } 377 378 return 0; 379 } 380 381 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, 382 size_t pair_count, size_t cpusetsize, 383 unsigned long __user *cpus_user, 384 unsigned int flags) 385 { 386 cpumask_t cpus, one_cpu; 387 bool clear_all = false; 388 size_t i; 389 int ret; 390 391 if (flags != RISCV_HWPROBE_WHICH_CPUS) 392 return -EINVAL; 393 394 if (!cpusetsize || !cpus_user) 395 return -EINVAL; 396 397 if (cpusetsize > cpumask_size()) 398 cpusetsize = cpumask_size(); 399 400 ret = copy_from_user(&cpus, cpus_user, cpusetsize); 401 if (ret) 402 return -EFAULT; 403 404 if (cpumask_empty(&cpus)) 405 cpumask_copy(&cpus, cpu_online_mask); 406 407 cpumask_and(&cpus, &cpus, cpu_online_mask); 408 409 cpumask_clear(&one_cpu); 410 411 for (i = 0; i < pair_count; i++) { 412 struct riscv_hwprobe pair, tmp; 413 int cpu; 414 415 ret = copy_from_user(&pair, &pairs[i], sizeof(pair)); 416 if (ret) 417 return -EFAULT; 418 419 if (!riscv_hwprobe_key_is_valid(pair.key)) { 420 clear_all = true; 421 pair = (struct riscv_hwprobe){ .key = -1, }; 422 ret = copy_to_user(&pairs[i], &pair, sizeof(pair)); 423 if (ret) 424 return -EFAULT; 425 } 426 427 if (clear_all) 428 continue; 429 430 tmp = (struct riscv_hwprobe){ .key = pair.key, }; 431 432 for_each_cpu(cpu, &cpus) { 433 cpumask_set_cpu(cpu, &one_cpu); 434 435 hwprobe_one_pair(&tmp, &one_cpu); 436 437 if (!riscv_hwprobe_pair_cmp(&tmp, &pair)) 438 cpumask_clear_cpu(cpu, &cpus); 439 440 cpumask_clear_cpu(cpu, &one_cpu); 441 } 442 } 443 444 if (clear_all) 445 cpumask_clear(&cpus); 446 447 ret = copy_to_user(cpus_user, &cpus, cpusetsize); 448 if (ret) 449 return -EFAULT; 450 451 return 0; 452 } 453 454 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 455 size_t pair_count, size_t cpusetsize, 456 unsigned long __user *cpus_user, 457 unsigned int flags) 458 { 459 if (flags & RISCV_HWPROBE_WHICH_CPUS) 460 return hwprobe_get_cpus(pairs, pair_count, cpusetsize, 461 cpus_user, flags); 462 463 return hwprobe_get_values(pairs, pair_count, cpusetsize, 464 cpus_user, flags); 465 } 466 467 #ifdef CONFIG_MMU 468 469 static int __init init_hwprobe_vdso_data(void) 470 { 471 struct vdso_arch_data *avd = vdso_k_arch_data; 472 u64 id_bitsmash = 0; 473 struct riscv_hwprobe pair; 474 int key; 475 476 /* 477 * Initialize vDSO data with the answers for the "all CPUs" case, to 478 * save a syscall in the common case. 479 */ 480 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 481 pair.key = key; 482 hwprobe_one_pair(&pair, cpu_online_mask); 483 484 WARN_ON_ONCE(pair.key < 0); 485 486 avd->all_cpu_hwprobe_values[key] = pair.value; 487 /* 488 * Smash together the vendor, arch, and impl IDs to see if 489 * they're all 0 or any negative. 490 */ 491 if (key <= RISCV_HWPROBE_KEY_MIMPID) 492 id_bitsmash |= pair.value; 493 } 494 495 /* 496 * If the arch, vendor, and implementation ID are all the same across 497 * all harts, then assume all CPUs are the same, and allow the vDSO to 498 * answer queries for arbitrary masks. However if all values are 0 (not 499 * populated) or any value returns -1 (varies across CPUs), then the 500 * vDSO should defer to the kernel for exotic cpu masks. 501 */ 502 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 503 return 0; 504 } 505 506 arch_initcall_sync(init_hwprobe_vdso_data); 507 508 #endif /* CONFIG_MMU */ 509 510 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 511 size_t, pair_count, size_t, cpusetsize, unsigned long __user *, 512 cpus, unsigned int, flags) 513 { 514 return do_riscv_hwprobe(pairs, pair_count, cpusetsize, 515 cpus, flags); 516 } 517