1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The hwprobe interface, for allowing userspace to probe to see which features 4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for 5 * more details. 6 */ 7 #include <linux/syscalls.h> 8 #include <asm/cacheflush.h> 9 #include <asm/cpufeature.h> 10 #include <asm/hwprobe.h> 11 #include <asm/processor.h> 12 #include <asm/delay.h> 13 #include <asm/sbi.h> 14 #include <asm/switch_to.h> 15 #include <asm/uaccess.h> 16 #include <asm/unistd.h> 17 #include <asm/vector.h> 18 #include <asm/vendor_extensions/sifive_hwprobe.h> 19 #include <asm/vendor_extensions/thead_hwprobe.h> 20 #include <vdso/vsyscall.h> 21 22 23 static void hwprobe_arch_id(struct riscv_hwprobe *pair, 24 const struct cpumask *cpus) 25 { 26 u64 id = -1ULL; 27 bool first = true; 28 int cpu; 29 30 for_each_cpu(cpu, cpus) { 31 u64 cpu_id; 32 33 switch (pair->key) { 34 case RISCV_HWPROBE_KEY_MVENDORID: 35 cpu_id = riscv_cached_mvendorid(cpu); 36 break; 37 case RISCV_HWPROBE_KEY_MIMPID: 38 cpu_id = riscv_cached_mimpid(cpu); 39 break; 40 case RISCV_HWPROBE_KEY_MARCHID: 41 cpu_id = riscv_cached_marchid(cpu); 42 break; 43 } 44 45 if (first) { 46 id = cpu_id; 47 first = false; 48 } 49 50 /* 51 * If there's a mismatch for the given set, return -1 in the 52 * value. 53 */ 54 if (id != cpu_id) { 55 id = -1ULL; 56 break; 57 } 58 } 59 60 pair->value = id; 61 } 62 63 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, 64 const struct cpumask *cpus) 65 { 66 int cpu; 67 u64 missing = 0; 68 69 pair->value = 0; 70 if (has_fpu()) 71 pair->value |= RISCV_HWPROBE_IMA_FD; 72 73 if (riscv_isa_extension_available(NULL, c)) 74 pair->value |= RISCV_HWPROBE_IMA_C; 75 76 if (has_vector() && riscv_isa_extension_available(NULL, v)) 77 pair->value |= RISCV_HWPROBE_IMA_V; 78 79 /* 80 * Loop through and record extensions that 1) anyone has, and 2) anyone 81 * doesn't have. 82 */ 83 for_each_cpu(cpu, cpus) { 84 struct riscv_isainfo *isainfo = &hart_isa[cpu]; 85 86 #define EXT_KEY(ext) \ 87 do { \ 88 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ 89 pair->value |= RISCV_HWPROBE_EXT_##ext; \ 90 else \ 91 missing |= RISCV_HWPROBE_EXT_##ext; \ 92 } while (false) 93 94 /* 95 * Only use EXT_KEY() for extensions which can be exposed to userspace, 96 * regardless of the kernel's configuration, as no other checks, besides 97 * presence in the hart_isa bitmap, are made. 98 */ 99 EXT_KEY(ZAAMO); 100 EXT_KEY(ZABHA); 101 EXT_KEY(ZACAS); 102 EXT_KEY(ZALRSC); 103 EXT_KEY(ZAWRS); 104 EXT_KEY(ZBA); 105 EXT_KEY(ZBB); 106 EXT_KEY(ZBC); 107 EXT_KEY(ZBKB); 108 EXT_KEY(ZBKC); 109 EXT_KEY(ZBKX); 110 EXT_KEY(ZBS); 111 EXT_KEY(ZCA); 112 EXT_KEY(ZCB); 113 EXT_KEY(ZCMOP); 114 EXT_KEY(ZICBOM); 115 EXT_KEY(ZICBOZ); 116 EXT_KEY(ZICNTR); 117 EXT_KEY(ZICOND); 118 EXT_KEY(ZIHINTNTL); 119 EXT_KEY(ZIHINTPAUSE); 120 EXT_KEY(ZIHPM); 121 EXT_KEY(ZIMOP); 122 EXT_KEY(ZKND); 123 EXT_KEY(ZKNE); 124 EXT_KEY(ZKNH); 125 EXT_KEY(ZKSED); 126 EXT_KEY(ZKSH); 127 EXT_KEY(ZKT); 128 EXT_KEY(ZTSO); 129 130 /* 131 * All the following extensions must depend on the kernel 132 * support of V. 133 */ 134 if (has_vector()) { 135 EXT_KEY(ZVBB); 136 EXT_KEY(ZVBC); 137 EXT_KEY(ZVE32F); 138 EXT_KEY(ZVE32X); 139 EXT_KEY(ZVE64D); 140 EXT_KEY(ZVE64F); 141 EXT_KEY(ZVE64X); 142 EXT_KEY(ZVFBFMIN); 143 EXT_KEY(ZVFBFWMA); 144 EXT_KEY(ZVFH); 145 EXT_KEY(ZVFHMIN); 146 EXT_KEY(ZVKB); 147 EXT_KEY(ZVKG); 148 EXT_KEY(ZVKNED); 149 EXT_KEY(ZVKNHA); 150 EXT_KEY(ZVKNHB); 151 EXT_KEY(ZVKSED); 152 EXT_KEY(ZVKSH); 153 EXT_KEY(ZVKT); 154 } 155 156 if (has_fpu()) { 157 EXT_KEY(ZCD); 158 EXT_KEY(ZCF); 159 EXT_KEY(ZFA); 160 EXT_KEY(ZFBFMIN); 161 EXT_KEY(ZFH); 162 EXT_KEY(ZFHMIN); 163 } 164 165 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) 166 EXT_KEY(SUPM); 167 #undef EXT_KEY 168 } 169 170 /* Now turn off reporting features if any CPU is missing it. */ 171 pair->value &= ~missing; 172 } 173 174 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) 175 { 176 struct riscv_hwprobe pair; 177 178 hwprobe_isa_ext0(&pair, cpus); 179 return (pair.value & ext); 180 } 181 182 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) 183 static u64 hwprobe_misaligned(const struct cpumask *cpus) 184 { 185 int cpu; 186 u64 perf = -1ULL; 187 188 for_each_cpu(cpu, cpus) { 189 int this_perf = per_cpu(misaligned_access_speed, cpu); 190 191 if (perf == -1ULL) 192 perf = this_perf; 193 194 if (perf != this_perf) { 195 perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; 196 break; 197 } 198 } 199 200 if (perf == -1ULL) 201 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN; 202 203 return perf; 204 } 205 #else 206 static u64 hwprobe_misaligned(const struct cpumask *cpus) 207 { 208 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS)) 209 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST; 210 211 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available()) 212 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED; 213 214 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW; 215 } 216 #endif 217 218 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED 219 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) 220 { 221 int cpu; 222 u64 perf = -1ULL; 223 224 /* Return if supported or not even if speed wasn't probed */ 225 for_each_cpu(cpu, cpus) { 226 int this_perf = per_cpu(vector_misaligned_access, cpu); 227 228 if (perf == -1ULL) 229 perf = this_perf; 230 231 if (perf != this_perf) { 232 perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 233 break; 234 } 235 } 236 237 if (perf == -1ULL) 238 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 239 240 return perf; 241 } 242 #else 243 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus) 244 { 245 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS)) 246 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST; 247 248 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS)) 249 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW; 250 251 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN; 252 } 253 #endif 254 255 static void hwprobe_one_pair(struct riscv_hwprobe *pair, 256 const struct cpumask *cpus) 257 { 258 switch (pair->key) { 259 case RISCV_HWPROBE_KEY_MVENDORID: 260 case RISCV_HWPROBE_KEY_MARCHID: 261 case RISCV_HWPROBE_KEY_MIMPID: 262 hwprobe_arch_id(pair, cpus); 263 break; 264 /* 265 * The kernel already assumes that the base single-letter ISA 266 * extensions are supported on all harts, and only supports the 267 * IMA base, so just cheat a bit here and tell that to 268 * userspace. 269 */ 270 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: 271 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; 272 break; 273 274 case RISCV_HWPROBE_KEY_IMA_EXT_0: 275 hwprobe_isa_ext0(pair, cpus); 276 break; 277 278 case RISCV_HWPROBE_KEY_CPUPERF_0: 279 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF: 280 pair->value = hwprobe_misaligned(cpus); 281 break; 282 283 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF: 284 pair->value = hwprobe_vec_misaligned(cpus); 285 break; 286 287 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: 288 pair->value = 0; 289 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) 290 pair->value = riscv_cboz_block_size; 291 break; 292 case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE: 293 pair->value = 0; 294 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM)) 295 pair->value = riscv_cbom_block_size; 296 break; 297 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: 298 pair->value = user_max_virt_addr(); 299 break; 300 301 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ: 302 pair->value = riscv_timebase; 303 break; 304 305 case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0: 306 hwprobe_isa_vendor_ext_sifive_0(pair, cpus); 307 break; 308 309 case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0: 310 hwprobe_isa_vendor_ext_thead_0(pair, cpus); 311 break; 312 313 /* 314 * For forward compatibility, unknown keys don't fail the whole 315 * call, but get their element key set to -1 and value set to 0 316 * indicating they're unrecognized. 317 */ 318 default: 319 pair->key = -1; 320 pair->value = 0; 321 break; 322 } 323 } 324 325 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs, 326 size_t pair_count, size_t cpusetsize, 327 unsigned long __user *cpus_user, 328 unsigned int flags) 329 { 330 size_t out; 331 int ret; 332 cpumask_t cpus; 333 334 /* Check the reserved flags. */ 335 if (flags != 0) 336 return -EINVAL; 337 338 /* 339 * The interface supports taking in a CPU mask, and returns values that 340 * are consistent across that mask. Allow userspace to specify NULL and 341 * 0 as a shortcut to all online CPUs. 342 */ 343 cpumask_clear(&cpus); 344 if (!cpusetsize && !cpus_user) { 345 cpumask_copy(&cpus, cpu_online_mask); 346 } else { 347 if (cpusetsize > cpumask_size()) 348 cpusetsize = cpumask_size(); 349 350 ret = copy_from_user(&cpus, cpus_user, cpusetsize); 351 if (ret) 352 return -EFAULT; 353 354 /* 355 * Userspace must provide at least one online CPU, without that 356 * there's no way to define what is supported. 357 */ 358 cpumask_and(&cpus, &cpus, cpu_online_mask); 359 if (cpumask_empty(&cpus)) 360 return -EINVAL; 361 } 362 363 for (out = 0; out < pair_count; out++, pairs++) { 364 struct riscv_hwprobe pair; 365 366 if (get_user(pair.key, &pairs->key)) 367 return -EFAULT; 368 369 pair.value = 0; 370 hwprobe_one_pair(&pair, &cpus); 371 ret = put_user(pair.key, &pairs->key); 372 if (ret == 0) 373 ret = put_user(pair.value, &pairs->value); 374 375 if (ret) 376 return -EFAULT; 377 } 378 379 return 0; 380 } 381 382 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, 383 size_t pair_count, size_t cpusetsize, 384 unsigned long __user *cpus_user, 385 unsigned int flags) 386 { 387 cpumask_t cpus, one_cpu; 388 bool clear_all = false; 389 size_t i; 390 int ret; 391 392 if (flags != RISCV_HWPROBE_WHICH_CPUS) 393 return -EINVAL; 394 395 if (!cpusetsize || !cpus_user) 396 return -EINVAL; 397 398 if (cpusetsize > cpumask_size()) 399 cpusetsize = cpumask_size(); 400 401 ret = copy_from_user(&cpus, cpus_user, cpusetsize); 402 if (ret) 403 return -EFAULT; 404 405 if (cpumask_empty(&cpus)) 406 cpumask_copy(&cpus, cpu_online_mask); 407 408 cpumask_and(&cpus, &cpus, cpu_online_mask); 409 410 cpumask_clear(&one_cpu); 411 412 for (i = 0; i < pair_count; i++) { 413 struct riscv_hwprobe pair, tmp; 414 int cpu; 415 416 ret = copy_from_user(&pair, &pairs[i], sizeof(pair)); 417 if (ret) 418 return -EFAULT; 419 420 if (!riscv_hwprobe_key_is_valid(pair.key)) { 421 clear_all = true; 422 pair = (struct riscv_hwprobe){ .key = -1, }; 423 ret = copy_to_user(&pairs[i], &pair, sizeof(pair)); 424 if (ret) 425 return -EFAULT; 426 } 427 428 if (clear_all) 429 continue; 430 431 tmp = (struct riscv_hwprobe){ .key = pair.key, }; 432 433 for_each_cpu(cpu, &cpus) { 434 cpumask_set_cpu(cpu, &one_cpu); 435 436 hwprobe_one_pair(&tmp, &one_cpu); 437 438 if (!riscv_hwprobe_pair_cmp(&tmp, &pair)) 439 cpumask_clear_cpu(cpu, &cpus); 440 441 cpumask_clear_cpu(cpu, &one_cpu); 442 } 443 } 444 445 if (clear_all) 446 cpumask_clear(&cpus); 447 448 ret = copy_to_user(cpus_user, &cpus, cpusetsize); 449 if (ret) 450 return -EFAULT; 451 452 return 0; 453 } 454 455 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 456 size_t pair_count, size_t cpusetsize, 457 unsigned long __user *cpus_user, 458 unsigned int flags) 459 { 460 if (flags & RISCV_HWPROBE_WHICH_CPUS) 461 return hwprobe_get_cpus(pairs, pair_count, cpusetsize, 462 cpus_user, flags); 463 464 return hwprobe_get_values(pairs, pair_count, cpusetsize, 465 cpus_user, flags); 466 } 467 468 #ifdef CONFIG_MMU 469 470 static int __init init_hwprobe_vdso_data(void) 471 { 472 struct vdso_arch_data *avd = vdso_k_arch_data; 473 u64 id_bitsmash = 0; 474 struct riscv_hwprobe pair; 475 int key; 476 477 /* 478 * Initialize vDSO data with the answers for the "all CPUs" case, to 479 * save a syscall in the common case. 480 */ 481 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { 482 pair.key = key; 483 hwprobe_one_pair(&pair, cpu_online_mask); 484 485 WARN_ON_ONCE(pair.key < 0); 486 487 avd->all_cpu_hwprobe_values[key] = pair.value; 488 /* 489 * Smash together the vendor, arch, and impl IDs to see if 490 * they're all 0 or any negative. 491 */ 492 if (key <= RISCV_HWPROBE_KEY_MIMPID) 493 id_bitsmash |= pair.value; 494 } 495 496 /* 497 * If the arch, vendor, and implementation ID are all the same across 498 * all harts, then assume all CPUs are the same, and allow the vDSO to 499 * answer queries for arbitrary masks. However if all values are 0 (not 500 * populated) or any value returns -1 (varies across CPUs), then the 501 * vDSO should defer to the kernel for exotic cpu masks. 502 */ 503 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 504 return 0; 505 } 506 507 arch_initcall_sync(init_hwprobe_vdso_data); 508 509 #endif /* CONFIG_MMU */ 510 511 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 512 size_t, pair_count, size_t, cpusetsize, unsigned long __user *, 513 cpus, unsigned int, flags) 514 { 515 return do_riscv_hwprobe(pairs, pair_count, cpusetsize, 516 cpus, flags); 517 } 518