1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The hwprobe interface, for allowing userspace to probe to see which features
4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
5 * more details.
6 */
7 #include <linux/syscalls.h>
8 #include <linux/completion.h>
9 #include <linux/atomic.h>
10 #include <linux/once.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cpufeature.h>
13 #include <asm/hwprobe.h>
14 #include <asm/processor.h>
15 #include <asm/delay.h>
16 #include <asm/sbi.h>
17 #include <asm/switch_to.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 #include <asm/vector.h>
21 #include <asm/vendor_extensions/mips_hwprobe.h>
22 #include <asm/vendor_extensions/sifive_hwprobe.h>
23 #include <asm/vendor_extensions/thead_hwprobe.h>
24 #include <vdso/vsyscall.h>
25
26
hwprobe_arch_id(struct riscv_hwprobe * pair,const struct cpumask * cpus)27 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
28 const struct cpumask *cpus)
29 {
30 u64 id = -1ULL;
31 bool first = true;
32 int cpu;
33
34 if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
35 pair->key != RISCV_HWPROBE_KEY_MIMPID &&
36 pair->key != RISCV_HWPROBE_KEY_MARCHID)
37 goto out;
38
39 for_each_cpu(cpu, cpus) {
40 u64 cpu_id;
41
42 switch (pair->key) {
43 case RISCV_HWPROBE_KEY_MVENDORID:
44 cpu_id = riscv_cached_mvendorid(cpu);
45 break;
46 case RISCV_HWPROBE_KEY_MIMPID:
47 cpu_id = riscv_cached_mimpid(cpu);
48 break;
49 case RISCV_HWPROBE_KEY_MARCHID:
50 cpu_id = riscv_cached_marchid(cpu);
51 break;
52 }
53
54 if (first) {
55 id = cpu_id;
56 first = false;
57 }
58
59 /*
60 * If there's a mismatch for the given set, return -1 in the
61 * value.
62 */
63 if (id != cpu_id) {
64 id = -1ULL;
65 break;
66 }
67 }
68
69 out:
70 pair->value = id;
71 }
72
hwprobe_isa_ext0(struct riscv_hwprobe * pair,const struct cpumask * cpus)73 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
74 const struct cpumask *cpus)
75 {
76 int cpu;
77 u64 missing = 0;
78
79 pair->value = 0;
80 if (has_fpu())
81 pair->value |= RISCV_HWPROBE_IMA_FD;
82
83 if (riscv_isa_extension_available(NULL, c))
84 pair->value |= RISCV_HWPROBE_IMA_C;
85
86 if (has_vector() && riscv_isa_extension_available(NULL, v))
87 pair->value |= RISCV_HWPROBE_IMA_V;
88
89 /*
90 * Loop through and record extensions that 1) anyone has, and 2) anyone
91 * doesn't have.
92 */
93 for_each_cpu(cpu, cpus) {
94 struct riscv_isainfo *isainfo = &hart_isa[cpu];
95
96 #define EXT_KEY(ext) \
97 do { \
98 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
99 pair->value |= RISCV_HWPROBE_EXT_##ext; \
100 else \
101 missing |= RISCV_HWPROBE_EXT_##ext; \
102 } while (false)
103
104 /*
105 * Only use EXT_KEY() for extensions which can be exposed to userspace,
106 * regardless of the kernel's configuration, as no other checks, besides
107 * presence in the hart_isa bitmap, are made.
108 */
109 EXT_KEY(ZAAMO);
110 EXT_KEY(ZABHA);
111 EXT_KEY(ZACAS);
112 EXT_KEY(ZALASR);
113 EXT_KEY(ZALRSC);
114 EXT_KEY(ZAWRS);
115 EXT_KEY(ZBA);
116 EXT_KEY(ZBB);
117 EXT_KEY(ZBC);
118 EXT_KEY(ZBKB);
119 EXT_KEY(ZBKC);
120 EXT_KEY(ZBKX);
121 EXT_KEY(ZBS);
122 EXT_KEY(ZCA);
123 EXT_KEY(ZCB);
124 EXT_KEY(ZCMOP);
125 EXT_KEY(ZICBOM);
126 EXT_KEY(ZICBOP);
127 EXT_KEY(ZICBOZ);
128 EXT_KEY(ZICNTR);
129 EXT_KEY(ZICOND);
130 EXT_KEY(ZIHINTNTL);
131 EXT_KEY(ZIHINTPAUSE);
132 EXT_KEY(ZIHPM);
133 EXT_KEY(ZIMOP);
134 EXT_KEY(ZKND);
135 EXT_KEY(ZKNE);
136 EXT_KEY(ZKNH);
137 EXT_KEY(ZKSED);
138 EXT_KEY(ZKSH);
139 EXT_KEY(ZKT);
140 EXT_KEY(ZTSO);
141
142 /*
143 * All the following extensions must depend on the kernel
144 * support of V.
145 */
146 if (has_vector()) {
147 EXT_KEY(ZVBB);
148 EXT_KEY(ZVBC);
149 EXT_KEY(ZVE32F);
150 EXT_KEY(ZVE32X);
151 EXT_KEY(ZVE64D);
152 EXT_KEY(ZVE64F);
153 EXT_KEY(ZVE64X);
154 EXT_KEY(ZVFBFMIN);
155 EXT_KEY(ZVFBFWMA);
156 EXT_KEY(ZVFH);
157 EXT_KEY(ZVFHMIN);
158 EXT_KEY(ZVKB);
159 EXT_KEY(ZVKG);
160 EXT_KEY(ZVKNED);
161 EXT_KEY(ZVKNHA);
162 EXT_KEY(ZVKNHB);
163 EXT_KEY(ZVKSED);
164 EXT_KEY(ZVKSH);
165 EXT_KEY(ZVKT);
166 }
167
168 EXT_KEY(ZCD);
169 EXT_KEY(ZCF);
170 EXT_KEY(ZFA);
171 EXT_KEY(ZFBFMIN);
172 EXT_KEY(ZFH);
173 EXT_KEY(ZFHMIN);
174
175 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
176 EXT_KEY(SUPM);
177 #undef EXT_KEY
178 }
179
180 /* Now turn off reporting features if any CPU is missing it. */
181 pair->value &= ~missing;
182 }
183
hwprobe_ext0_has(const struct cpumask * cpus,u64 ext)184 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
185 {
186 struct riscv_hwprobe pair;
187
188 hwprobe_isa_ext0(&pair, cpus);
189 return (pair.value & ext);
190 }
191
192 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
hwprobe_misaligned(const struct cpumask * cpus)193 static u64 hwprobe_misaligned(const struct cpumask *cpus)
194 {
195 int cpu;
196 u64 perf = -1ULL;
197
198 for_each_cpu(cpu, cpus) {
199 int this_perf = per_cpu(misaligned_access_speed, cpu);
200
201 if (perf == -1ULL)
202 perf = this_perf;
203
204 if (perf != this_perf) {
205 perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
206 break;
207 }
208 }
209
210 if (perf == -1ULL)
211 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
212
213 return perf;
214 }
215 #else
hwprobe_misaligned(const struct cpumask * cpus)216 static u64 hwprobe_misaligned(const struct cpumask *cpus)
217 {
218 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
219 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
220
221 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
222 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
223
224 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
225 }
226 #endif
227
228 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
hwprobe_vec_misaligned(const struct cpumask * cpus)229 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
230 {
231 int cpu;
232 u64 perf = -1ULL;
233
234 /* Return if supported or not even if speed wasn't probed */
235 for_each_cpu(cpu, cpus) {
236 int this_perf = per_cpu(vector_misaligned_access, cpu);
237
238 if (perf == -1ULL)
239 perf = this_perf;
240
241 if (perf != this_perf) {
242 perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
243 break;
244 }
245 }
246
247 if (perf == -1ULL)
248 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
249
250 return perf;
251 }
252 #else
hwprobe_vec_misaligned(const struct cpumask * cpus)253 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
254 {
255 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
256 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
257
258 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
259 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
260
261 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
262 }
263 #endif
264
hwprobe_one_pair(struct riscv_hwprobe * pair,const struct cpumask * cpus)265 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
266 const struct cpumask *cpus)
267 {
268 switch (pair->key) {
269 case RISCV_HWPROBE_KEY_MVENDORID:
270 case RISCV_HWPROBE_KEY_MARCHID:
271 case RISCV_HWPROBE_KEY_MIMPID:
272 hwprobe_arch_id(pair, cpus);
273 break;
274 /*
275 * The kernel already assumes that the base single-letter ISA
276 * extensions are supported on all harts, and only supports the
277 * IMA base, so just cheat a bit here and tell that to
278 * userspace.
279 */
280 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
281 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
282 break;
283
284 case RISCV_HWPROBE_KEY_IMA_EXT_0:
285 hwprobe_isa_ext0(pair, cpus);
286 break;
287
288 case RISCV_HWPROBE_KEY_CPUPERF_0:
289 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
290 pair->value = hwprobe_misaligned(cpus);
291 break;
292
293 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
294 pair->value = hwprobe_vec_misaligned(cpus);
295 break;
296
297 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
298 pair->value = 0;
299 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
300 pair->value = riscv_cboz_block_size;
301 break;
302 case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
303 pair->value = 0;
304 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
305 pair->value = riscv_cbom_block_size;
306 break;
307 case RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE:
308 pair->value = 0;
309 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOP))
310 pair->value = riscv_cbop_block_size;
311 break;
312 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
313 pair->value = user_max_virt_addr();
314 break;
315
316 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
317 pair->value = riscv_timebase;
318 break;
319
320 case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0:
321 hwprobe_isa_vendor_ext_sifive_0(pair, cpus);
322 break;
323
324 case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
325 hwprobe_isa_vendor_ext_thead_0(pair, cpus);
326 break;
327 case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0:
328 hwprobe_isa_vendor_ext_mips_0(pair, cpus);
329 break;
330
331 /*
332 * For forward compatibility, unknown keys don't fail the whole
333 * call, but get their element key set to -1 and value set to 0
334 * indicating they're unrecognized.
335 */
336 default:
337 pair->key = -1;
338 pair->value = 0;
339 break;
340 }
341 }
342
hwprobe_get_values(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)343 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
344 size_t pair_count, size_t cpusetsize,
345 unsigned long __user *cpus_user,
346 unsigned int flags)
347 {
348 size_t out;
349 int ret;
350 cpumask_t cpus;
351
352 /* Check the reserved flags. */
353 if (flags != 0)
354 return -EINVAL;
355
356 /*
357 * The interface supports taking in a CPU mask, and returns values that
358 * are consistent across that mask. Allow userspace to specify NULL and
359 * 0 as a shortcut to all online CPUs.
360 */
361 cpumask_clear(&cpus);
362 if (!cpusetsize && !cpus_user) {
363 cpumask_copy(&cpus, cpu_online_mask);
364 } else {
365 if (cpusetsize > cpumask_size())
366 cpusetsize = cpumask_size();
367
368 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
369 if (ret)
370 return -EFAULT;
371
372 /*
373 * Userspace must provide at least one online CPU, without that
374 * there's no way to define what is supported.
375 */
376 cpumask_and(&cpus, &cpus, cpu_online_mask);
377 if (cpumask_empty(&cpus))
378 return -EINVAL;
379 }
380
381 for (out = 0; out < pair_count; out++, pairs++) {
382 struct riscv_hwprobe pair;
383
384 if (get_user(pair.key, &pairs->key))
385 return -EFAULT;
386
387 pair.value = 0;
388 hwprobe_one_pair(&pair, &cpus);
389 ret = put_user(pair.key, &pairs->key);
390 if (ret == 0)
391 ret = put_user(pair.value, &pairs->value);
392
393 if (ret)
394 return -EFAULT;
395 }
396
397 return 0;
398 }
399
hwprobe_get_cpus(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)400 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
401 size_t pair_count, size_t cpusetsize,
402 unsigned long __user *cpus_user,
403 unsigned int flags)
404 {
405 cpumask_t cpus, one_cpu;
406 bool clear_all = false;
407 size_t i;
408 int ret;
409
410 if (flags != RISCV_HWPROBE_WHICH_CPUS)
411 return -EINVAL;
412
413 if (!cpusetsize || !cpus_user)
414 return -EINVAL;
415
416 if (cpusetsize > cpumask_size())
417 cpusetsize = cpumask_size();
418
419 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
420 if (ret)
421 return -EFAULT;
422
423 if (cpumask_empty(&cpus))
424 cpumask_copy(&cpus, cpu_online_mask);
425
426 cpumask_and(&cpus, &cpus, cpu_online_mask);
427
428 cpumask_clear(&one_cpu);
429
430 for (i = 0; i < pair_count; i++) {
431 struct riscv_hwprobe pair, tmp;
432 int cpu;
433
434 ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
435 if (ret)
436 return -EFAULT;
437
438 if (!riscv_hwprobe_key_is_valid(pair.key)) {
439 clear_all = true;
440 pair = (struct riscv_hwprobe){ .key = -1, };
441 ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
442 if (ret)
443 return -EFAULT;
444 }
445
446 if (clear_all)
447 continue;
448
449 tmp = (struct riscv_hwprobe){ .key = pair.key, };
450
451 for_each_cpu(cpu, &cpus) {
452 cpumask_set_cpu(cpu, &one_cpu);
453
454 hwprobe_one_pair(&tmp, &one_cpu);
455
456 if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
457 cpumask_clear_cpu(cpu, &cpus);
458
459 cpumask_clear_cpu(cpu, &one_cpu);
460 }
461 }
462
463 if (clear_all)
464 cpumask_clear(&cpus);
465
466 ret = copy_to_user(cpus_user, &cpus, cpusetsize);
467 if (ret)
468 return -EFAULT;
469
470 return 0;
471 }
472
473 #ifdef CONFIG_MMU
474
475 static DECLARE_COMPLETION(boot_probes_done);
476 static atomic_t pending_boot_probes = ATOMIC_INIT(1);
477
riscv_hwprobe_register_async_probe(void)478 void riscv_hwprobe_register_async_probe(void)
479 {
480 atomic_inc(&pending_boot_probes);
481 }
482
riscv_hwprobe_complete_async_probe(void)483 void riscv_hwprobe_complete_async_probe(void)
484 {
485 if (atomic_dec_and_test(&pending_boot_probes))
486 complete(&boot_probes_done);
487 }
488
complete_hwprobe_vdso_data(void)489 static int complete_hwprobe_vdso_data(void)
490 {
491 struct vdso_arch_data *avd = vdso_k_arch_data;
492 u64 id_bitsmash = 0;
493 struct riscv_hwprobe pair;
494 int key;
495
496 if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
497 wait_for_completion(&boot_probes_done);
498
499 /*
500 * Initialize vDSO data with the answers for the "all CPUs" case, to
501 * save a syscall in the common case.
502 */
503 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
504 pair.key = key;
505 hwprobe_one_pair(&pair, cpu_online_mask);
506
507 WARN_ON_ONCE(pair.key < 0);
508
509 avd->all_cpu_hwprobe_values[key] = pair.value;
510 /*
511 * Smash together the vendor, arch, and impl IDs to see if
512 * they're all 0 or any negative.
513 */
514 if (key <= RISCV_HWPROBE_KEY_MIMPID)
515 id_bitsmash |= pair.value;
516 }
517
518 /*
519 * If the arch, vendor, and implementation ID are all the same across
520 * all harts, then assume all CPUs are the same, and allow the vDSO to
521 * answer queries for arbitrary masks. However if all values are 0 (not
522 * populated) or any value returns -1 (varies across CPUs), then the
523 * vDSO should defer to the kernel for exotic cpu masks.
524 */
525 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
526
527 /*
528 * Make sure all the VDSO values are visible before we look at them.
529 * This pairs with the implicit "no speculativly visible accesses"
530 * barrier in the VDSO hwprobe code.
531 */
532 smp_wmb();
533 avd->ready = true;
534 return 0;
535 }
536
init_hwprobe_vdso_data(void)537 static int __init init_hwprobe_vdso_data(void)
538 {
539 struct vdso_arch_data *avd = vdso_k_arch_data;
540
541 /*
542 * Prevent the vDSO cached values from being used, as they're not ready
543 * yet.
544 */
545 avd->ready = false;
546 return 0;
547 }
548
549 arch_initcall_sync(init_hwprobe_vdso_data);
550
551 #else
552
complete_hwprobe_vdso_data(void)553 static int complete_hwprobe_vdso_data(void) { return 0; }
554
555 #endif /* CONFIG_MMU */
556
do_riscv_hwprobe(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)557 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
558 size_t pair_count, size_t cpusetsize,
559 unsigned long __user *cpus_user,
560 unsigned int flags)
561 {
562 DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
563
564 if (flags & RISCV_HWPROBE_WHICH_CPUS)
565 return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
566 cpus_user, flags);
567
568 return hwprobe_get_values(pairs, pair_count, cpusetsize,
569 cpus_user, flags);
570 }
571
SYSCALL_DEFINE5(riscv_hwprobe,struct riscv_hwprobe __user *,pairs,size_t,pair_count,size_t,cpusetsize,unsigned long __user *,cpus,unsigned int,flags)572 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
573 size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
574 cpus, unsigned int, flags)
575 {
576 return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
577 cpus, flags);
578 }
579