xref: /linux/arch/riscv/kernel/sys_hwprobe.c (revision 5d15d2ad36b0f7afab83ca9fc8a2a6e60cbe54c4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <linux/completion.h>
9 #include <linux/atomic.h>
10 #include <linux/once.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cpufeature.h>
13 #include <asm/hwprobe.h>
14 #include <asm/processor.h>
15 #include <asm/delay.h>
16 #include <asm/sbi.h>
17 #include <asm/switch_to.h>
18 #include <asm/uaccess.h>
19 #include <asm/unistd.h>
20 #include <asm/vector.h>
21 #include <asm/vendor_extensions/mips_hwprobe.h>
22 #include <asm/vendor_extensions/sifive_hwprobe.h>
23 #include <asm/vendor_extensions/thead_hwprobe.h>
24 #include <vdso/vsyscall.h>
25 
26 
27 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
28 			    const struct cpumask *cpus)
29 {
30 	u64 id = -1ULL;
31 	bool first = true;
32 	int cpu;
33 
34 	for_each_cpu(cpu, cpus) {
35 		u64 cpu_id;
36 
37 		switch (pair->key) {
38 		case RISCV_HWPROBE_KEY_MVENDORID:
39 			cpu_id = riscv_cached_mvendorid(cpu);
40 			break;
41 		case RISCV_HWPROBE_KEY_MIMPID:
42 			cpu_id = riscv_cached_mimpid(cpu);
43 			break;
44 		case RISCV_HWPROBE_KEY_MARCHID:
45 			cpu_id = riscv_cached_marchid(cpu);
46 			break;
47 		}
48 
49 		if (first) {
50 			id = cpu_id;
51 			first = false;
52 		}
53 
54 		/*
55 		 * If there's a mismatch for the given set, return -1 in the
56 		 * value.
57 		 */
58 		if (id != cpu_id) {
59 			id = -1ULL;
60 			break;
61 		}
62 	}
63 
64 	pair->value = id;
65 }
66 
67 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
68 			     const struct cpumask *cpus)
69 {
70 	int cpu;
71 	u64 missing = 0;
72 
73 	pair->value = 0;
74 	if (has_fpu())
75 		pair->value |= RISCV_HWPROBE_IMA_FD;
76 
77 	if (riscv_isa_extension_available(NULL, c))
78 		pair->value |= RISCV_HWPROBE_IMA_C;
79 
80 	if (has_vector() && riscv_isa_extension_available(NULL, v))
81 		pair->value |= RISCV_HWPROBE_IMA_V;
82 
83 	/*
84 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
85 	 * doesn't have.
86 	 */
87 	for_each_cpu(cpu, cpus) {
88 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
89 
90 #define EXT_KEY(ext)									\
91 	do {										\
92 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
93 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
94 		else									\
95 			missing |= RISCV_HWPROBE_EXT_##ext;				\
96 	} while (false)
97 
98 		/*
99 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
100 		 * regardless of the kernel's configuration, as no other checks, besides
101 		 * presence in the hart_isa bitmap, are made.
102 		 */
103 		EXT_KEY(ZAAMO);
104 		EXT_KEY(ZABHA);
105 		EXT_KEY(ZACAS);
106 		EXT_KEY(ZALRSC);
107 		EXT_KEY(ZAWRS);
108 		EXT_KEY(ZBA);
109 		EXT_KEY(ZBB);
110 		EXT_KEY(ZBC);
111 		EXT_KEY(ZBKB);
112 		EXT_KEY(ZBKC);
113 		EXT_KEY(ZBKX);
114 		EXT_KEY(ZBS);
115 		EXT_KEY(ZCA);
116 		EXT_KEY(ZCB);
117 		EXT_KEY(ZCMOP);
118 		EXT_KEY(ZICBOM);
119 		EXT_KEY(ZICBOZ);
120 		EXT_KEY(ZICNTR);
121 		EXT_KEY(ZICOND);
122 		EXT_KEY(ZIHINTNTL);
123 		EXT_KEY(ZIHINTPAUSE);
124 		EXT_KEY(ZIHPM);
125 		EXT_KEY(ZIMOP);
126 		EXT_KEY(ZKND);
127 		EXT_KEY(ZKNE);
128 		EXT_KEY(ZKNH);
129 		EXT_KEY(ZKSED);
130 		EXT_KEY(ZKSH);
131 		EXT_KEY(ZKT);
132 		EXT_KEY(ZTSO);
133 
134 		/*
135 		 * All the following extensions must depend on the kernel
136 		 * support of V.
137 		 */
138 		if (has_vector()) {
139 			EXT_KEY(ZVBB);
140 			EXT_KEY(ZVBC);
141 			EXT_KEY(ZVE32F);
142 			EXT_KEY(ZVE32X);
143 			EXT_KEY(ZVE64D);
144 			EXT_KEY(ZVE64F);
145 			EXT_KEY(ZVE64X);
146 			EXT_KEY(ZVFBFMIN);
147 			EXT_KEY(ZVFBFWMA);
148 			EXT_KEY(ZVFH);
149 			EXT_KEY(ZVFHMIN);
150 			EXT_KEY(ZVKB);
151 			EXT_KEY(ZVKG);
152 			EXT_KEY(ZVKNED);
153 			EXT_KEY(ZVKNHA);
154 			EXT_KEY(ZVKNHB);
155 			EXT_KEY(ZVKSED);
156 			EXT_KEY(ZVKSH);
157 			EXT_KEY(ZVKT);
158 		}
159 
160 		EXT_KEY(ZCD);
161 		EXT_KEY(ZCF);
162 		EXT_KEY(ZFA);
163 		EXT_KEY(ZFBFMIN);
164 		EXT_KEY(ZFH);
165 		EXT_KEY(ZFHMIN);
166 
167 		if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
168 			EXT_KEY(SUPM);
169 #undef EXT_KEY
170 	}
171 
172 	/* Now turn off reporting features if any CPU is missing it. */
173 	pair->value &= ~missing;
174 }
175 
176 static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
177 {
178 	struct riscv_hwprobe pair;
179 
180 	hwprobe_isa_ext0(&pair, cpus);
181 	return (pair.value & ext);
182 }
183 
184 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
185 static u64 hwprobe_misaligned(const struct cpumask *cpus)
186 {
187 	int cpu;
188 	u64 perf = -1ULL;
189 
190 	for_each_cpu(cpu, cpus) {
191 		int this_perf = per_cpu(misaligned_access_speed, cpu);
192 
193 		if (perf == -1ULL)
194 			perf = this_perf;
195 
196 		if (perf != this_perf) {
197 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
198 			break;
199 		}
200 	}
201 
202 	if (perf == -1ULL)
203 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
204 
205 	return perf;
206 }
207 #else
208 static u64 hwprobe_misaligned(const struct cpumask *cpus)
209 {
210 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
211 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
212 
213 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
214 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
215 
216 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
217 }
218 #endif
219 
220 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
221 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
222 {
223 	int cpu;
224 	u64 perf = -1ULL;
225 
226 	/* Return if supported or not even if speed wasn't probed */
227 	for_each_cpu(cpu, cpus) {
228 		int this_perf = per_cpu(vector_misaligned_access, cpu);
229 
230 		if (perf == -1ULL)
231 			perf = this_perf;
232 
233 		if (perf != this_perf) {
234 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
235 			break;
236 		}
237 	}
238 
239 	if (perf == -1ULL)
240 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
241 
242 	return perf;
243 }
244 #else
245 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
246 {
247 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
248 		return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
249 
250 	if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
251 		return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
252 
253 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
254 }
255 #endif
256 
257 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
258 			     const struct cpumask *cpus)
259 {
260 	switch (pair->key) {
261 	case RISCV_HWPROBE_KEY_MVENDORID:
262 	case RISCV_HWPROBE_KEY_MARCHID:
263 	case RISCV_HWPROBE_KEY_MIMPID:
264 		hwprobe_arch_id(pair, cpus);
265 		break;
266 	/*
267 	 * The kernel already assumes that the base single-letter ISA
268 	 * extensions are supported on all harts, and only supports the
269 	 * IMA base, so just cheat a bit here and tell that to
270 	 * userspace.
271 	 */
272 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
273 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
274 		break;
275 
276 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
277 		hwprobe_isa_ext0(pair, cpus);
278 		break;
279 
280 	case RISCV_HWPROBE_KEY_CPUPERF_0:
281 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
282 		pair->value = hwprobe_misaligned(cpus);
283 		break;
284 
285 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
286 		pair->value = hwprobe_vec_misaligned(cpus);
287 		break;
288 
289 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
290 		pair->value = 0;
291 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
292 			pair->value = riscv_cboz_block_size;
293 		break;
294 	case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
295 		pair->value = 0;
296 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
297 			pair->value = riscv_cbom_block_size;
298 		break;
299 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
300 		pair->value = user_max_virt_addr();
301 		break;
302 
303 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
304 		pair->value = riscv_timebase;
305 		break;
306 
307 	case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0:
308 		hwprobe_isa_vendor_ext_sifive_0(pair, cpus);
309 		break;
310 
311 	case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
312 		hwprobe_isa_vendor_ext_thead_0(pair, cpus);
313 		break;
314 	case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0:
315 		hwprobe_isa_vendor_ext_mips_0(pair, cpus);
316 		break;
317 
318 	/*
319 	 * For forward compatibility, unknown keys don't fail the whole
320 	 * call, but get their element key set to -1 and value set to 0
321 	 * indicating they're unrecognized.
322 	 */
323 	default:
324 		pair->key = -1;
325 		pair->value = 0;
326 		break;
327 	}
328 }
329 
330 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
331 			      size_t pair_count, size_t cpusetsize,
332 			      unsigned long __user *cpus_user,
333 			      unsigned int flags)
334 {
335 	size_t out;
336 	int ret;
337 	cpumask_t cpus;
338 
339 	/* Check the reserved flags. */
340 	if (flags != 0)
341 		return -EINVAL;
342 
343 	/*
344 	 * The interface supports taking in a CPU mask, and returns values that
345 	 * are consistent across that mask. Allow userspace to specify NULL and
346 	 * 0 as a shortcut to all online CPUs.
347 	 */
348 	cpumask_clear(&cpus);
349 	if (!cpusetsize && !cpus_user) {
350 		cpumask_copy(&cpus, cpu_online_mask);
351 	} else {
352 		if (cpusetsize > cpumask_size())
353 			cpusetsize = cpumask_size();
354 
355 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
356 		if (ret)
357 			return -EFAULT;
358 
359 		/*
360 		 * Userspace must provide at least one online CPU, without that
361 		 * there's no way to define what is supported.
362 		 */
363 		cpumask_and(&cpus, &cpus, cpu_online_mask);
364 		if (cpumask_empty(&cpus))
365 			return -EINVAL;
366 	}
367 
368 	for (out = 0; out < pair_count; out++, pairs++) {
369 		struct riscv_hwprobe pair;
370 
371 		if (get_user(pair.key, &pairs->key))
372 			return -EFAULT;
373 
374 		pair.value = 0;
375 		hwprobe_one_pair(&pair, &cpus);
376 		ret = put_user(pair.key, &pairs->key);
377 		if (ret == 0)
378 			ret = put_user(pair.value, &pairs->value);
379 
380 		if (ret)
381 			return -EFAULT;
382 	}
383 
384 	return 0;
385 }
386 
387 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
388 			    size_t pair_count, size_t cpusetsize,
389 			    unsigned long __user *cpus_user,
390 			    unsigned int flags)
391 {
392 	cpumask_t cpus, one_cpu;
393 	bool clear_all = false;
394 	size_t i;
395 	int ret;
396 
397 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
398 		return -EINVAL;
399 
400 	if (!cpusetsize || !cpus_user)
401 		return -EINVAL;
402 
403 	if (cpusetsize > cpumask_size())
404 		cpusetsize = cpumask_size();
405 
406 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
407 	if (ret)
408 		return -EFAULT;
409 
410 	if (cpumask_empty(&cpus))
411 		cpumask_copy(&cpus, cpu_online_mask);
412 
413 	cpumask_and(&cpus, &cpus, cpu_online_mask);
414 
415 	cpumask_clear(&one_cpu);
416 
417 	for (i = 0; i < pair_count; i++) {
418 		struct riscv_hwprobe pair, tmp;
419 		int cpu;
420 
421 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
422 		if (ret)
423 			return -EFAULT;
424 
425 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
426 			clear_all = true;
427 			pair = (struct riscv_hwprobe){ .key = -1, };
428 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
429 			if (ret)
430 				return -EFAULT;
431 		}
432 
433 		if (clear_all)
434 			continue;
435 
436 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
437 
438 		for_each_cpu(cpu, &cpus) {
439 			cpumask_set_cpu(cpu, &one_cpu);
440 
441 			hwprobe_one_pair(&tmp, &one_cpu);
442 
443 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
444 				cpumask_clear_cpu(cpu, &cpus);
445 
446 			cpumask_clear_cpu(cpu, &one_cpu);
447 		}
448 	}
449 
450 	if (clear_all)
451 		cpumask_clear(&cpus);
452 
453 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
454 	if (ret)
455 		return -EFAULT;
456 
457 	return 0;
458 }
459 
460 #ifdef CONFIG_MMU
461 
462 static DECLARE_COMPLETION(boot_probes_done);
463 static atomic_t pending_boot_probes = ATOMIC_INIT(1);
464 
465 void riscv_hwprobe_register_async_probe(void)
466 {
467 	atomic_inc(&pending_boot_probes);
468 }
469 
470 void riscv_hwprobe_complete_async_probe(void)
471 {
472 	if (atomic_dec_and_test(&pending_boot_probes))
473 		complete(&boot_probes_done);
474 }
475 
476 static int complete_hwprobe_vdso_data(void)
477 {
478 	struct vdso_arch_data *avd = vdso_k_arch_data;
479 	u64 id_bitsmash = 0;
480 	struct riscv_hwprobe pair;
481 	int key;
482 
483 	if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
484 		wait_for_completion(&boot_probes_done);
485 
486 	/*
487 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
488 	 * save a syscall in the common case.
489 	 */
490 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
491 		pair.key = key;
492 		hwprobe_one_pair(&pair, cpu_online_mask);
493 
494 		WARN_ON_ONCE(pair.key < 0);
495 
496 		avd->all_cpu_hwprobe_values[key] = pair.value;
497 		/*
498 		 * Smash together the vendor, arch, and impl IDs to see if
499 		 * they're all 0 or any negative.
500 		 */
501 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
502 			id_bitsmash |= pair.value;
503 	}
504 
505 	/*
506 	 * If the arch, vendor, and implementation ID are all the same across
507 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
508 	 * answer queries for arbitrary masks. However if all values are 0 (not
509 	 * populated) or any value returns -1 (varies across CPUs), then the
510 	 * vDSO should defer to the kernel for exotic cpu masks.
511 	 */
512 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
513 
514 	/*
515 	 * Make sure all the VDSO values are visible before we look at them.
516 	 * This pairs with the implicit "no speculativly visible accesses"
517 	 * barrier in the VDSO hwprobe code.
518 	 */
519 	smp_wmb();
520 	avd->ready = true;
521 	return 0;
522 }
523 
524 static int __init init_hwprobe_vdso_data(void)
525 {
526 	struct vdso_arch_data *avd = vdso_k_arch_data;
527 
528 	/*
529 	 * Prevent the vDSO cached values from being used, as they're not ready
530 	 * yet.
531 	 */
532 	avd->ready = false;
533 	return 0;
534 }
535 
536 arch_initcall_sync(init_hwprobe_vdso_data);
537 
538 #else
539 
540 static int complete_hwprobe_vdso_data(void) { return 0; }
541 
542 #endif /* CONFIG_MMU */
543 
544 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
545 			    size_t pair_count, size_t cpusetsize,
546 			    unsigned long __user *cpus_user,
547 			    unsigned int flags)
548 {
549 	DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
550 
551 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
552 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
553 					cpus_user, flags);
554 
555 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
556 				cpus_user, flags);
557 }
558 
559 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
560 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
561 		cpus, unsigned int, flags)
562 {
563 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
564 				cpus, flags);
565 }
566