1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The hwprobe interface, for allowing userspace to probe to see which features
4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
5 * more details.
6 */
7 #include <linux/syscalls.h>
8 #include <linux/completion.h>
9 #include <linux/atomic.h>
10 #include <linux/once.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cpufeature.h>
13 #include <asm/hwprobe.h>
14 #include <asm/processor.h>
15 #include <asm/delay.h>
16 #include <asm/sbi.h>
17 #include <asm/switch_to.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 #include <asm/vector.h>
21 #include <asm/vendor_extensions/mips_hwprobe.h>
22 #include <asm/vendor_extensions/sifive_hwprobe.h>
23 #include <asm/vendor_extensions/thead_hwprobe.h>
24 #include <vdso/vsyscall.h>
25
26
hwprobe_arch_id(struct riscv_hwprobe * pair,const struct cpumask * cpus)27 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
28 const struct cpumask *cpus)
29 {
30 u64 id = -1ULL;
31 bool first = true;
32 int cpu;
33
34 if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
35 pair->key != RISCV_HWPROBE_KEY_MIMPID &&
36 pair->key != RISCV_HWPROBE_KEY_MARCHID)
37 goto out;
38
39 for_each_cpu(cpu, cpus) {
40 u64 cpu_id;
41
42 switch (pair->key) {
43 case RISCV_HWPROBE_KEY_MVENDORID:
44 cpu_id = riscv_cached_mvendorid(cpu);
45 break;
46 case RISCV_HWPROBE_KEY_MIMPID:
47 cpu_id = riscv_cached_mimpid(cpu);
48 break;
49 case RISCV_HWPROBE_KEY_MARCHID:
50 cpu_id = riscv_cached_marchid(cpu);
51 break;
52 }
53
54 if (first) {
55 id = cpu_id;
56 first = false;
57 }
58
59 /*
60 * If there's a mismatch for the given set, return -1 in the
61 * value.
62 */
63 if (id != cpu_id) {
64 id = -1ULL;
65 break;
66 }
67 }
68
69 out:
70 pair->value = id;
71 }
72
hwprobe_isa_ext0(struct riscv_hwprobe * pair,const struct cpumask * cpus)73 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
74 const struct cpumask *cpus)
75 {
76 int cpu;
77 u64 missing = 0;
78
79 pair->value = 0;
80 if (has_fpu())
81 pair->value |= RISCV_HWPROBE_IMA_FD;
82
83 if (riscv_isa_extension_available(NULL, c))
84 pair->value |= RISCV_HWPROBE_IMA_C;
85
86 if (has_vector() && riscv_isa_extension_available(NULL, v))
87 pair->value |= RISCV_HWPROBE_IMA_V;
88
89 /*
90 * Loop through and record extensions that 1) anyone has, and 2) anyone
91 * doesn't have.
92 */
93 for_each_cpu(cpu, cpus) {
94 struct riscv_isainfo *isainfo = &hart_isa[cpu];
95
96 #define EXT_KEY(ext) \
97 do { \
98 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
99 pair->value |= RISCV_HWPROBE_EXT_##ext; \
100 else \
101 missing |= RISCV_HWPROBE_EXT_##ext; \
102 } while (false)
103
104 /*
105 * Only use EXT_KEY() for extensions which can be exposed to userspace,
106 * regardless of the kernel's configuration, as no other checks, besides
107 * presence in the hart_isa bitmap, are made.
108 */
109 EXT_KEY(ZAAMO);
110 EXT_KEY(ZABHA);
111 EXT_KEY(ZACAS);
112 EXT_KEY(ZALASR);
113 EXT_KEY(ZALRSC);
114 EXT_KEY(ZAWRS);
115 EXT_KEY(ZBA);
116 EXT_KEY(ZBB);
117 EXT_KEY(ZBC);
118 EXT_KEY(ZBKB);
119 EXT_KEY(ZBKC);
120 EXT_KEY(ZBKX);
121 EXT_KEY(ZBS);
122 EXT_KEY(ZCA);
123 EXT_KEY(ZCB);
124 EXT_KEY(ZCLSD);
125 EXT_KEY(ZCMOP);
126 EXT_KEY(ZICBOM);
127 EXT_KEY(ZICBOP);
128 EXT_KEY(ZICBOZ);
129 EXT_KEY(ZICNTR);
130 EXT_KEY(ZICOND);
131 EXT_KEY(ZIHINTNTL);
132 EXT_KEY(ZIHINTPAUSE);
133 EXT_KEY(ZIHPM);
134 EXT_KEY(ZILSD);
135 EXT_KEY(ZIMOP);
136 EXT_KEY(ZKND);
137 EXT_KEY(ZKNE);
138 EXT_KEY(ZKNH);
139 EXT_KEY(ZKSED);
140 EXT_KEY(ZKSH);
141 EXT_KEY(ZKT);
142 EXT_KEY(ZTSO);
143
144 /*
145 * All the following extensions must depend on the kernel
146 * support of V.
147 */
148 if (has_vector()) {
149 EXT_KEY(ZVBB);
150 EXT_KEY(ZVBC);
151 EXT_KEY(ZVE32F);
152 EXT_KEY(ZVE32X);
153 EXT_KEY(ZVE64D);
154 EXT_KEY(ZVE64F);
155 EXT_KEY(ZVE64X);
156 EXT_KEY(ZVFBFMIN);
157 EXT_KEY(ZVFBFWMA);
158 EXT_KEY(ZVFH);
159 EXT_KEY(ZVFHMIN);
160 EXT_KEY(ZVKB);
161 EXT_KEY(ZVKG);
162 EXT_KEY(ZVKNED);
163 EXT_KEY(ZVKNHA);
164 EXT_KEY(ZVKNHB);
165 EXT_KEY(ZVKSED);
166 EXT_KEY(ZVKSH);
167 EXT_KEY(ZVKT);
168 }
169
170 EXT_KEY(ZCD);
171 EXT_KEY(ZCF);
172 EXT_KEY(ZFA);
173 EXT_KEY(ZFBFMIN);
174 EXT_KEY(ZFH);
175 EXT_KEY(ZFHMIN);
176
177 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
178 EXT_KEY(SUPM);
179 #undef EXT_KEY
180 }
181
182 /* Now turn off reporting features if any CPU is missing it. */
183 pair->value &= ~missing;
184 }
185
hwprobe_ext0_has(const struct cpumask * cpus,u64 ext)186 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
187 {
188 struct riscv_hwprobe pair;
189
190 hwprobe_isa_ext0(&pair, cpus);
191 return (pair.value & ext);
192 }
193
194 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
hwprobe_misaligned(const struct cpumask * cpus)195 static u64 hwprobe_misaligned(const struct cpumask *cpus)
196 {
197 int cpu;
198 u64 perf = -1ULL;
199
200 for_each_cpu(cpu, cpus) {
201 int this_perf = per_cpu(misaligned_access_speed, cpu);
202
203 if (perf == -1ULL)
204 perf = this_perf;
205
206 if (perf != this_perf) {
207 perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
208 break;
209 }
210 }
211
212 if (perf == -1ULL)
213 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
214
215 return perf;
216 }
217 #else
hwprobe_misaligned(const struct cpumask * cpus)218 static u64 hwprobe_misaligned(const struct cpumask *cpus)
219 {
220 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
221 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
222
223 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
224 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
225
226 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
227 }
228 #endif
229
230 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
hwprobe_vec_misaligned(const struct cpumask * cpus)231 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
232 {
233 int cpu;
234 u64 perf = -1ULL;
235
236 /* Return if supported or not even if speed wasn't probed */
237 for_each_cpu(cpu, cpus) {
238 int this_perf = per_cpu(vector_misaligned_access, cpu);
239
240 if (perf == -1ULL)
241 perf = this_perf;
242
243 if (perf != this_perf) {
244 perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
245 break;
246 }
247 }
248
249 if (perf == -1ULL)
250 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
251
252 return perf;
253 }
254 #else
hwprobe_vec_misaligned(const struct cpumask * cpus)255 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
256 {
257 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
258 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
259
260 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
261 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
262
263 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
264 }
265 #endif
266
hwprobe_one_pair(struct riscv_hwprobe * pair,const struct cpumask * cpus)267 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
268 const struct cpumask *cpus)
269 {
270 switch (pair->key) {
271 case RISCV_HWPROBE_KEY_MVENDORID:
272 case RISCV_HWPROBE_KEY_MARCHID:
273 case RISCV_HWPROBE_KEY_MIMPID:
274 hwprobe_arch_id(pair, cpus);
275 break;
276 /*
277 * The kernel already assumes that the base single-letter ISA
278 * extensions are supported on all harts, and only supports the
279 * IMA base, so just cheat a bit here and tell that to
280 * userspace.
281 */
282 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
283 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
284 break;
285
286 case RISCV_HWPROBE_KEY_IMA_EXT_0:
287 hwprobe_isa_ext0(pair, cpus);
288 break;
289
290 case RISCV_HWPROBE_KEY_CPUPERF_0:
291 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
292 pair->value = hwprobe_misaligned(cpus);
293 break;
294
295 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
296 pair->value = hwprobe_vec_misaligned(cpus);
297 break;
298
299 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
300 pair->value = 0;
301 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
302 pair->value = riscv_cboz_block_size;
303 break;
304 case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
305 pair->value = 0;
306 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
307 pair->value = riscv_cbom_block_size;
308 break;
309 case RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE:
310 pair->value = 0;
311 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOP))
312 pair->value = riscv_cbop_block_size;
313 break;
314 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
315 pair->value = user_max_virt_addr();
316 break;
317
318 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
319 pair->value = riscv_timebase;
320 break;
321
322 case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0:
323 hwprobe_isa_vendor_ext_sifive_0(pair, cpus);
324 break;
325
326 case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
327 hwprobe_isa_vendor_ext_thead_0(pair, cpus);
328 break;
329 case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0:
330 hwprobe_isa_vendor_ext_mips_0(pair, cpus);
331 break;
332
333 /*
334 * For forward compatibility, unknown keys don't fail the whole
335 * call, but get their element key set to -1 and value set to 0
336 * indicating they're unrecognized.
337 */
338 default:
339 pair->key = -1;
340 pair->value = 0;
341 break;
342 }
343 }
344
hwprobe_get_values(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)345 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
346 size_t pair_count, size_t cpusetsize,
347 unsigned long __user *cpus_user,
348 unsigned int flags)
349 {
350 size_t out;
351 int ret;
352 cpumask_t cpus;
353
354 /* Check the reserved flags. */
355 if (flags != 0)
356 return -EINVAL;
357
358 /*
359 * The interface supports taking in a CPU mask, and returns values that
360 * are consistent across that mask. Allow userspace to specify NULL and
361 * 0 as a shortcut to all online CPUs.
362 */
363 cpumask_clear(&cpus);
364 if (!cpusetsize && !cpus_user) {
365 cpumask_copy(&cpus, cpu_online_mask);
366 } else {
367 if (cpusetsize > cpumask_size())
368 cpusetsize = cpumask_size();
369
370 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
371 if (ret)
372 return -EFAULT;
373
374 /*
375 * Userspace must provide at least one online CPU, without that
376 * there's no way to define what is supported.
377 */
378 cpumask_and(&cpus, &cpus, cpu_online_mask);
379 if (cpumask_empty(&cpus))
380 return -EINVAL;
381 }
382
383 for (out = 0; out < pair_count; out++, pairs++) {
384 struct riscv_hwprobe pair;
385
386 if (get_user(pair.key, &pairs->key))
387 return -EFAULT;
388
389 pair.value = 0;
390 hwprobe_one_pair(&pair, &cpus);
391 ret = put_user(pair.key, &pairs->key);
392 if (ret == 0)
393 ret = put_user(pair.value, &pairs->value);
394
395 if (ret)
396 return -EFAULT;
397 }
398
399 return 0;
400 }
401
hwprobe_get_cpus(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)402 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
403 size_t pair_count, size_t cpusetsize,
404 unsigned long __user *cpus_user,
405 unsigned int flags)
406 {
407 cpumask_t cpus, one_cpu;
408 bool clear_all = false;
409 size_t i;
410 int ret;
411
412 if (flags != RISCV_HWPROBE_WHICH_CPUS)
413 return -EINVAL;
414
415 if (!cpusetsize || !cpus_user)
416 return -EINVAL;
417
418 if (cpusetsize > cpumask_size())
419 cpusetsize = cpumask_size();
420
421 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
422 if (ret)
423 return -EFAULT;
424
425 if (cpumask_empty(&cpus))
426 cpumask_copy(&cpus, cpu_online_mask);
427
428 cpumask_and(&cpus, &cpus, cpu_online_mask);
429
430 cpumask_clear(&one_cpu);
431
432 for (i = 0; i < pair_count; i++) {
433 struct riscv_hwprobe pair, tmp;
434 int cpu;
435
436 ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
437 if (ret)
438 return -EFAULT;
439
440 if (!riscv_hwprobe_key_is_valid(pair.key)) {
441 clear_all = true;
442 pair = (struct riscv_hwprobe){ .key = -1, };
443 ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
444 if (ret)
445 return -EFAULT;
446 }
447
448 if (clear_all)
449 continue;
450
451 tmp = (struct riscv_hwprobe){ .key = pair.key, };
452
453 for_each_cpu(cpu, &cpus) {
454 cpumask_set_cpu(cpu, &one_cpu);
455
456 hwprobe_one_pair(&tmp, &one_cpu);
457
458 if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
459 cpumask_clear_cpu(cpu, &cpus);
460
461 cpumask_clear_cpu(cpu, &one_cpu);
462 }
463 }
464
465 if (clear_all)
466 cpumask_clear(&cpus);
467
468 ret = copy_to_user(cpus_user, &cpus, cpusetsize);
469 if (ret)
470 return -EFAULT;
471
472 return 0;
473 }
474
475 #ifdef CONFIG_MMU
476
477 static DECLARE_COMPLETION(boot_probes_done);
478 static atomic_t pending_boot_probes = ATOMIC_INIT(1);
479
riscv_hwprobe_register_async_probe(void)480 void riscv_hwprobe_register_async_probe(void)
481 {
482 atomic_inc(&pending_boot_probes);
483 }
484
riscv_hwprobe_complete_async_probe(void)485 void riscv_hwprobe_complete_async_probe(void)
486 {
487 if (atomic_dec_and_test(&pending_boot_probes))
488 complete(&boot_probes_done);
489 }
490
complete_hwprobe_vdso_data(void)491 static int complete_hwprobe_vdso_data(void)
492 {
493 struct vdso_arch_data *avd = vdso_k_arch_data;
494 u64 id_bitsmash = 0;
495 struct riscv_hwprobe pair;
496 int key;
497
498 if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
499 wait_for_completion(&boot_probes_done);
500
501 /*
502 * Initialize vDSO data with the answers for the "all CPUs" case, to
503 * save a syscall in the common case.
504 */
505 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
506 pair.key = key;
507 hwprobe_one_pair(&pair, cpu_online_mask);
508
509 WARN_ON_ONCE(pair.key < 0);
510
511 avd->all_cpu_hwprobe_values[key] = pair.value;
512 /*
513 * Smash together the vendor, arch, and impl IDs to see if
514 * they're all 0 or any negative.
515 */
516 if (key <= RISCV_HWPROBE_KEY_MIMPID)
517 id_bitsmash |= pair.value;
518 }
519
520 /*
521 * If the arch, vendor, and implementation ID are all the same across
522 * all harts, then assume all CPUs are the same, and allow the vDSO to
523 * answer queries for arbitrary masks. However if all values are 0 (not
524 * populated) or any value returns -1 (varies across CPUs), then the
525 * vDSO should defer to the kernel for exotic cpu masks.
526 */
527 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
528
529 /*
530 * Make sure all the VDSO values are visible before we look at them.
531 * This pairs with the implicit "no speculativly visible accesses"
532 * barrier in the VDSO hwprobe code.
533 */
534 smp_wmb();
535 avd->ready = true;
536 return 0;
537 }
538
init_hwprobe_vdso_data(void)539 static int __init init_hwprobe_vdso_data(void)
540 {
541 struct vdso_arch_data *avd = vdso_k_arch_data;
542
543 /*
544 * Prevent the vDSO cached values from being used, as they're not ready
545 * yet.
546 */
547 avd->ready = false;
548 return 0;
549 }
550
551 arch_initcall_sync(init_hwprobe_vdso_data);
552
553 #else
554
complete_hwprobe_vdso_data(void)555 static int complete_hwprobe_vdso_data(void) { return 0; }
556
557 #endif /* CONFIG_MMU */
558
do_riscv_hwprobe(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)559 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
560 size_t pair_count, size_t cpusetsize,
561 unsigned long __user *cpus_user,
562 unsigned int flags)
563 {
564 DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
565
566 if (flags & RISCV_HWPROBE_WHICH_CPUS)
567 return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
568 cpus_user, flags);
569
570 return hwprobe_get_values(pairs, pair_count, cpusetsize,
571 cpus_user, flags);
572 }
573
SYSCALL_DEFINE5(riscv_hwprobe,struct riscv_hwprobe __user *,pairs,size_t,pair_count,size_t,cpusetsize,unsigned long __user *,cpus,unsigned int,flags)574 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
575 size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
576 cpus, unsigned int, flags)
577 {
578 return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
579 cpus, flags);
580 }
581