1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * The hwprobe interface, for allowing userspace to probe to see which features
4 * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
5 * more details.
6 */
7 #include <linux/syscalls.h>
8 #include <linux/completion.h>
9 #include <linux/atomic.h>
10 #include <linux/once.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cpufeature.h>
13 #include <asm/hwprobe.h>
14 #include <asm/processor.h>
15 #include <asm/delay.h>
16 #include <asm/sbi.h>
17 #include <asm/switch_to.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 #include <asm/vector.h>
21 #include <asm/vendor_extensions/mips_hwprobe.h>
22 #include <asm/vendor_extensions/sifive_hwprobe.h>
23 #include <asm/vendor_extensions/thead_hwprobe.h>
24 #include <vdso/vsyscall.h>
25
26
hwprobe_arch_id(struct riscv_hwprobe * pair,const struct cpumask * cpus)27 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
28 const struct cpumask *cpus)
29 {
30 u64 id = -1ULL;
31 bool first = true;
32 int cpu;
33
34 if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
35 pair->key != RISCV_HWPROBE_KEY_MIMPID &&
36 pair->key != RISCV_HWPROBE_KEY_MARCHID)
37 goto out;
38
39 for_each_cpu(cpu, cpus) {
40 u64 cpu_id;
41
42 switch (pair->key) {
43 case RISCV_HWPROBE_KEY_MVENDORID:
44 cpu_id = riscv_cached_mvendorid(cpu);
45 break;
46 case RISCV_HWPROBE_KEY_MIMPID:
47 cpu_id = riscv_cached_mimpid(cpu);
48 break;
49 case RISCV_HWPROBE_KEY_MARCHID:
50 cpu_id = riscv_cached_marchid(cpu);
51 break;
52 }
53
54 if (first) {
55 id = cpu_id;
56 first = false;
57 }
58
59 /*
60 * If there's a mismatch for the given set, return -1 in the
61 * value.
62 */
63 if (id != cpu_id) {
64 id = -1ULL;
65 break;
66 }
67 }
68
69 out:
70 pair->value = id;
71 }
72
hwprobe_isa_ext0(struct riscv_hwprobe * pair,const struct cpumask * cpus)73 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
74 const struct cpumask *cpus)
75 {
76 int cpu;
77 u64 missing = 0;
78
79 pair->value = 0;
80 if (has_fpu())
81 pair->value |= RISCV_HWPROBE_IMA_FD;
82
83 if (riscv_isa_extension_available(NULL, c))
84 pair->value |= RISCV_HWPROBE_IMA_C;
85
86 if (has_vector() && riscv_isa_extension_available(NULL, v))
87 pair->value |= RISCV_HWPROBE_IMA_V;
88
89 /*
90 * Loop through and record extensions that 1) anyone has, and 2) anyone
91 * doesn't have.
92 */
93 for_each_cpu(cpu, cpus) {
94 struct riscv_isainfo *isainfo = &hart_isa[cpu];
95
96 #define EXT_KEY(ext) \
97 do { \
98 if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
99 pair->value |= RISCV_HWPROBE_EXT_##ext; \
100 else \
101 missing |= RISCV_HWPROBE_EXT_##ext; \
102 } while (false)
103
104 /*
105 * Only use EXT_KEY() for extensions which can be exposed to userspace,
106 * regardless of the kernel's configuration, as no other checks, besides
107 * presence in the hart_isa bitmap, are made.
108 */
109 EXT_KEY(ZAAMO);
110 EXT_KEY(ZABHA);
111 EXT_KEY(ZACAS);
112 EXT_KEY(ZALRSC);
113 EXT_KEY(ZAWRS);
114 EXT_KEY(ZBA);
115 EXT_KEY(ZBB);
116 EXT_KEY(ZBC);
117 EXT_KEY(ZBKB);
118 EXT_KEY(ZBKC);
119 EXT_KEY(ZBKX);
120 EXT_KEY(ZBS);
121 EXT_KEY(ZCA);
122 EXT_KEY(ZCB);
123 EXT_KEY(ZCMOP);
124 EXT_KEY(ZICBOM);
125 EXT_KEY(ZICBOZ);
126 EXT_KEY(ZICNTR);
127 EXT_KEY(ZICOND);
128 EXT_KEY(ZIHINTNTL);
129 EXT_KEY(ZIHINTPAUSE);
130 EXT_KEY(ZIHPM);
131 EXT_KEY(ZIMOP);
132 EXT_KEY(ZKND);
133 EXT_KEY(ZKNE);
134 EXT_KEY(ZKNH);
135 EXT_KEY(ZKSED);
136 EXT_KEY(ZKSH);
137 EXT_KEY(ZKT);
138 EXT_KEY(ZTSO);
139
140 /*
141 * All the following extensions must depend on the kernel
142 * support of V.
143 */
144 if (has_vector()) {
145 EXT_KEY(ZVBB);
146 EXT_KEY(ZVBC);
147 EXT_KEY(ZVE32F);
148 EXT_KEY(ZVE32X);
149 EXT_KEY(ZVE64D);
150 EXT_KEY(ZVE64F);
151 EXT_KEY(ZVE64X);
152 EXT_KEY(ZVFBFMIN);
153 EXT_KEY(ZVFBFWMA);
154 EXT_KEY(ZVFH);
155 EXT_KEY(ZVFHMIN);
156 EXT_KEY(ZVKB);
157 EXT_KEY(ZVKG);
158 EXT_KEY(ZVKNED);
159 EXT_KEY(ZVKNHA);
160 EXT_KEY(ZVKNHB);
161 EXT_KEY(ZVKSED);
162 EXT_KEY(ZVKSH);
163 EXT_KEY(ZVKT);
164 }
165
166 EXT_KEY(ZCD);
167 EXT_KEY(ZCF);
168 EXT_KEY(ZFA);
169 EXT_KEY(ZFBFMIN);
170 EXT_KEY(ZFH);
171 EXT_KEY(ZFHMIN);
172
173 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
174 EXT_KEY(SUPM);
175 #undef EXT_KEY
176 }
177
178 /* Now turn off reporting features if any CPU is missing it. */
179 pair->value &= ~missing;
180 }
181
hwprobe_ext0_has(const struct cpumask * cpus,u64 ext)182 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
183 {
184 struct riscv_hwprobe pair;
185
186 hwprobe_isa_ext0(&pair, cpus);
187 return (pair.value & ext);
188 }
189
190 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
hwprobe_misaligned(const struct cpumask * cpus)191 static u64 hwprobe_misaligned(const struct cpumask *cpus)
192 {
193 int cpu;
194 u64 perf = -1ULL;
195
196 for_each_cpu(cpu, cpus) {
197 int this_perf = per_cpu(misaligned_access_speed, cpu);
198
199 if (perf == -1ULL)
200 perf = this_perf;
201
202 if (perf != this_perf) {
203 perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
204 break;
205 }
206 }
207
208 if (perf == -1ULL)
209 return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
210
211 return perf;
212 }
213 #else
hwprobe_misaligned(const struct cpumask * cpus)214 static u64 hwprobe_misaligned(const struct cpumask *cpus)
215 {
216 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
217 return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
218
219 if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
220 return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
221
222 return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
223 }
224 #endif
225
226 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
hwprobe_vec_misaligned(const struct cpumask * cpus)227 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
228 {
229 int cpu;
230 u64 perf = -1ULL;
231
232 /* Return if supported or not even if speed wasn't probed */
233 for_each_cpu(cpu, cpus) {
234 int this_perf = per_cpu(vector_misaligned_access, cpu);
235
236 if (perf == -1ULL)
237 perf = this_perf;
238
239 if (perf != this_perf) {
240 perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
241 break;
242 }
243 }
244
245 if (perf == -1ULL)
246 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
247
248 return perf;
249 }
250 #else
hwprobe_vec_misaligned(const struct cpumask * cpus)251 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
252 {
253 if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
254 return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
255
256 if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
257 return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
258
259 return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
260 }
261 #endif
262
hwprobe_one_pair(struct riscv_hwprobe * pair,const struct cpumask * cpus)263 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
264 const struct cpumask *cpus)
265 {
266 switch (pair->key) {
267 case RISCV_HWPROBE_KEY_MVENDORID:
268 case RISCV_HWPROBE_KEY_MARCHID:
269 case RISCV_HWPROBE_KEY_MIMPID:
270 hwprobe_arch_id(pair, cpus);
271 break;
272 /*
273 * The kernel already assumes that the base single-letter ISA
274 * extensions are supported on all harts, and only supports the
275 * IMA base, so just cheat a bit here and tell that to
276 * userspace.
277 */
278 case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
279 pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
280 break;
281
282 case RISCV_HWPROBE_KEY_IMA_EXT_0:
283 hwprobe_isa_ext0(pair, cpus);
284 break;
285
286 case RISCV_HWPROBE_KEY_CPUPERF_0:
287 case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
288 pair->value = hwprobe_misaligned(cpus);
289 break;
290
291 case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
292 pair->value = hwprobe_vec_misaligned(cpus);
293 break;
294
295 case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
296 pair->value = 0;
297 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
298 pair->value = riscv_cboz_block_size;
299 break;
300 case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
301 pair->value = 0;
302 if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
303 pair->value = riscv_cbom_block_size;
304 break;
305 case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
306 pair->value = user_max_virt_addr();
307 break;
308
309 case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
310 pair->value = riscv_timebase;
311 break;
312
313 case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0:
314 hwprobe_isa_vendor_ext_sifive_0(pair, cpus);
315 break;
316
317 case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
318 hwprobe_isa_vendor_ext_thead_0(pair, cpus);
319 break;
320 case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0:
321 hwprobe_isa_vendor_ext_mips_0(pair, cpus);
322 break;
323
324 /*
325 * For forward compatibility, unknown keys don't fail the whole
326 * call, but get their element key set to -1 and value set to 0
327 * indicating they're unrecognized.
328 */
329 default:
330 pair->key = -1;
331 pair->value = 0;
332 break;
333 }
334 }
335
hwprobe_get_values(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)336 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
337 size_t pair_count, size_t cpusetsize,
338 unsigned long __user *cpus_user,
339 unsigned int flags)
340 {
341 size_t out;
342 int ret;
343 cpumask_t cpus;
344
345 /* Check the reserved flags. */
346 if (flags != 0)
347 return -EINVAL;
348
349 /*
350 * The interface supports taking in a CPU mask, and returns values that
351 * are consistent across that mask. Allow userspace to specify NULL and
352 * 0 as a shortcut to all online CPUs.
353 */
354 cpumask_clear(&cpus);
355 if (!cpusetsize && !cpus_user) {
356 cpumask_copy(&cpus, cpu_online_mask);
357 } else {
358 if (cpusetsize > cpumask_size())
359 cpusetsize = cpumask_size();
360
361 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
362 if (ret)
363 return -EFAULT;
364
365 /*
366 * Userspace must provide at least one online CPU, without that
367 * there's no way to define what is supported.
368 */
369 cpumask_and(&cpus, &cpus, cpu_online_mask);
370 if (cpumask_empty(&cpus))
371 return -EINVAL;
372 }
373
374 for (out = 0; out < pair_count; out++, pairs++) {
375 struct riscv_hwprobe pair;
376
377 if (get_user(pair.key, &pairs->key))
378 return -EFAULT;
379
380 pair.value = 0;
381 hwprobe_one_pair(&pair, &cpus);
382 ret = put_user(pair.key, &pairs->key);
383 if (ret == 0)
384 ret = put_user(pair.value, &pairs->value);
385
386 if (ret)
387 return -EFAULT;
388 }
389
390 return 0;
391 }
392
hwprobe_get_cpus(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)393 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
394 size_t pair_count, size_t cpusetsize,
395 unsigned long __user *cpus_user,
396 unsigned int flags)
397 {
398 cpumask_t cpus, one_cpu;
399 bool clear_all = false;
400 size_t i;
401 int ret;
402
403 if (flags != RISCV_HWPROBE_WHICH_CPUS)
404 return -EINVAL;
405
406 if (!cpusetsize || !cpus_user)
407 return -EINVAL;
408
409 if (cpusetsize > cpumask_size())
410 cpusetsize = cpumask_size();
411
412 ret = copy_from_user(&cpus, cpus_user, cpusetsize);
413 if (ret)
414 return -EFAULT;
415
416 if (cpumask_empty(&cpus))
417 cpumask_copy(&cpus, cpu_online_mask);
418
419 cpumask_and(&cpus, &cpus, cpu_online_mask);
420
421 cpumask_clear(&one_cpu);
422
423 for (i = 0; i < pair_count; i++) {
424 struct riscv_hwprobe pair, tmp;
425 int cpu;
426
427 ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
428 if (ret)
429 return -EFAULT;
430
431 if (!riscv_hwprobe_key_is_valid(pair.key)) {
432 clear_all = true;
433 pair = (struct riscv_hwprobe){ .key = -1, };
434 ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
435 if (ret)
436 return -EFAULT;
437 }
438
439 if (clear_all)
440 continue;
441
442 tmp = (struct riscv_hwprobe){ .key = pair.key, };
443
444 for_each_cpu(cpu, &cpus) {
445 cpumask_set_cpu(cpu, &one_cpu);
446
447 hwprobe_one_pair(&tmp, &one_cpu);
448
449 if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
450 cpumask_clear_cpu(cpu, &cpus);
451
452 cpumask_clear_cpu(cpu, &one_cpu);
453 }
454 }
455
456 if (clear_all)
457 cpumask_clear(&cpus);
458
459 ret = copy_to_user(cpus_user, &cpus, cpusetsize);
460 if (ret)
461 return -EFAULT;
462
463 return 0;
464 }
465
466 #ifdef CONFIG_MMU
467
468 static DECLARE_COMPLETION(boot_probes_done);
469 static atomic_t pending_boot_probes = ATOMIC_INIT(1);
470
riscv_hwprobe_register_async_probe(void)471 void riscv_hwprobe_register_async_probe(void)
472 {
473 atomic_inc(&pending_boot_probes);
474 }
475
riscv_hwprobe_complete_async_probe(void)476 void riscv_hwprobe_complete_async_probe(void)
477 {
478 if (atomic_dec_and_test(&pending_boot_probes))
479 complete(&boot_probes_done);
480 }
481
complete_hwprobe_vdso_data(void)482 static int complete_hwprobe_vdso_data(void)
483 {
484 struct vdso_arch_data *avd = vdso_k_arch_data;
485 u64 id_bitsmash = 0;
486 struct riscv_hwprobe pair;
487 int key;
488
489 if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
490 wait_for_completion(&boot_probes_done);
491
492 /*
493 * Initialize vDSO data with the answers for the "all CPUs" case, to
494 * save a syscall in the common case.
495 */
496 for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
497 pair.key = key;
498 hwprobe_one_pair(&pair, cpu_online_mask);
499
500 WARN_ON_ONCE(pair.key < 0);
501
502 avd->all_cpu_hwprobe_values[key] = pair.value;
503 /*
504 * Smash together the vendor, arch, and impl IDs to see if
505 * they're all 0 or any negative.
506 */
507 if (key <= RISCV_HWPROBE_KEY_MIMPID)
508 id_bitsmash |= pair.value;
509 }
510
511 /*
512 * If the arch, vendor, and implementation ID are all the same across
513 * all harts, then assume all CPUs are the same, and allow the vDSO to
514 * answer queries for arbitrary masks. However if all values are 0 (not
515 * populated) or any value returns -1 (varies across CPUs), then the
516 * vDSO should defer to the kernel for exotic cpu masks.
517 */
518 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
519
520 /*
521 * Make sure all the VDSO values are visible before we look at them.
522 * This pairs with the implicit "no speculativly visible accesses"
523 * barrier in the VDSO hwprobe code.
524 */
525 smp_wmb();
526 avd->ready = true;
527 return 0;
528 }
529
init_hwprobe_vdso_data(void)530 static int __init init_hwprobe_vdso_data(void)
531 {
532 struct vdso_arch_data *avd = vdso_k_arch_data;
533
534 /*
535 * Prevent the vDSO cached values from being used, as they're not ready
536 * yet.
537 */
538 avd->ready = false;
539 return 0;
540 }
541
542 arch_initcall_sync(init_hwprobe_vdso_data);
543
544 #else
545
complete_hwprobe_vdso_data(void)546 static int complete_hwprobe_vdso_data(void) { return 0; }
547
548 #endif /* CONFIG_MMU */
549
do_riscv_hwprobe(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)550 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
551 size_t pair_count, size_t cpusetsize,
552 unsigned long __user *cpus_user,
553 unsigned int flags)
554 {
555 DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
556
557 if (flags & RISCV_HWPROBE_WHICH_CPUS)
558 return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
559 cpus_user, flags);
560
561 return hwprobe_get_values(pairs, pair_count, cpusetsize,
562 cpus_user, flags);
563 }
564
SYSCALL_DEFINE5(riscv_hwprobe,struct riscv_hwprobe __user *,pairs,size_t,pair_count,size_t,cpusetsize,unsigned long __user *,cpus,unsigned int,flags)565 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
566 size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
567 cpus, unsigned int, flags)
568 {
569 return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
570 cpus, flags);
571 }
572