xref: /linux/arch/riscv/kernel/sys_hwprobe.c (revision 2f2cd9f33435834a6dfca406bb121ff9a885fb23)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
13 #include <asm/sbi.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <asm/vendor_extensions/thead_hwprobe.h>
19 #include <vdso/vsyscall.h>
20 
21 
22 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
23 			    const struct cpumask *cpus)
24 {
25 	u64 id = -1ULL;
26 	bool first = true;
27 	int cpu;
28 
29 	for_each_cpu(cpu, cpus) {
30 		u64 cpu_id;
31 
32 		switch (pair->key) {
33 		case RISCV_HWPROBE_KEY_MVENDORID:
34 			cpu_id = riscv_cached_mvendorid(cpu);
35 			break;
36 		case RISCV_HWPROBE_KEY_MIMPID:
37 			cpu_id = riscv_cached_mimpid(cpu);
38 			break;
39 		case RISCV_HWPROBE_KEY_MARCHID:
40 			cpu_id = riscv_cached_marchid(cpu);
41 			break;
42 		}
43 
44 		if (first) {
45 			id = cpu_id;
46 			first = false;
47 		}
48 
49 		/*
50 		 * If there's a mismatch for the given set, return -1 in the
51 		 * value.
52 		 */
53 		if (id != cpu_id) {
54 			id = -1ULL;
55 			break;
56 		}
57 	}
58 
59 	pair->value = id;
60 }
61 
62 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
63 			     const struct cpumask *cpus)
64 {
65 	int cpu;
66 	u64 missing = 0;
67 
68 	pair->value = 0;
69 	if (has_fpu())
70 		pair->value |= RISCV_HWPROBE_IMA_FD;
71 
72 	if (riscv_isa_extension_available(NULL, c))
73 		pair->value |= RISCV_HWPROBE_IMA_C;
74 
75 	if (has_vector() && riscv_isa_extension_available(NULL, v))
76 		pair->value |= RISCV_HWPROBE_IMA_V;
77 
78 	/*
79 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
80 	 * doesn't have.
81 	 */
82 	for_each_cpu(cpu, cpus) {
83 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
84 
85 #define EXT_KEY(ext)									\
86 	do {										\
87 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
88 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
89 		else									\
90 			missing |= RISCV_HWPROBE_EXT_##ext;				\
91 	} while (false)
92 
93 		/*
94 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
95 		 * regardless of the kernel's configuration, as no other checks, besides
96 		 * presence in the hart_isa bitmap, are made.
97 		 */
98 		EXT_KEY(ZACAS);
99 		EXT_KEY(ZAWRS);
100 		EXT_KEY(ZBA);
101 		EXT_KEY(ZBB);
102 		EXT_KEY(ZBC);
103 		EXT_KEY(ZBKB);
104 		EXT_KEY(ZBKC);
105 		EXT_KEY(ZBKX);
106 		EXT_KEY(ZBS);
107 		EXT_KEY(ZCA);
108 		EXT_KEY(ZCB);
109 		EXT_KEY(ZCMOP);
110 		EXT_KEY(ZICBOZ);
111 		EXT_KEY(ZICNTR);
112 		EXT_KEY(ZICOND);
113 		EXT_KEY(ZIHINTNTL);
114 		EXT_KEY(ZIHINTPAUSE);
115 		EXT_KEY(ZIHPM);
116 		EXT_KEY(ZIMOP);
117 		EXT_KEY(ZKND);
118 		EXT_KEY(ZKNE);
119 		EXT_KEY(ZKNH);
120 		EXT_KEY(ZKSED);
121 		EXT_KEY(ZKSH);
122 		EXT_KEY(ZKT);
123 		EXT_KEY(ZTSO);
124 
125 		/*
126 		 * All the following extensions must depend on the kernel
127 		 * support of V.
128 		 */
129 		if (has_vector()) {
130 			EXT_KEY(ZVBB);
131 			EXT_KEY(ZVBC);
132 			EXT_KEY(ZVE32F);
133 			EXT_KEY(ZVE32X);
134 			EXT_KEY(ZVE64D);
135 			EXT_KEY(ZVE64F);
136 			EXT_KEY(ZVE64X);
137 			EXT_KEY(ZVFBFMIN);
138 			EXT_KEY(ZVFBFWMA);
139 			EXT_KEY(ZVFH);
140 			EXT_KEY(ZVFHMIN);
141 			EXT_KEY(ZVKB);
142 			EXT_KEY(ZVKG);
143 			EXT_KEY(ZVKNED);
144 			EXT_KEY(ZVKNHA);
145 			EXT_KEY(ZVKNHB);
146 			EXT_KEY(ZVKSED);
147 			EXT_KEY(ZVKSH);
148 			EXT_KEY(ZVKT);
149 		}
150 
151 		if (has_fpu()) {
152 			EXT_KEY(ZCD);
153 			EXT_KEY(ZCF);
154 			EXT_KEY(ZFA);
155 			EXT_KEY(ZFBFMIN);
156 			EXT_KEY(ZFH);
157 			EXT_KEY(ZFHMIN);
158 		}
159 
160 		if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
161 			EXT_KEY(SUPM);
162 #undef EXT_KEY
163 	}
164 
165 	/* Now turn off reporting features if any CPU is missing it. */
166 	pair->value &= ~missing;
167 }
168 
169 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
170 {
171 	struct riscv_hwprobe pair;
172 
173 	hwprobe_isa_ext0(&pair, cpus);
174 	return (pair.value & ext);
175 }
176 
177 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
178 static u64 hwprobe_misaligned(const struct cpumask *cpus)
179 {
180 	int cpu;
181 	u64 perf = -1ULL;
182 
183 	for_each_cpu(cpu, cpus) {
184 		int this_perf = per_cpu(misaligned_access_speed, cpu);
185 
186 		if (perf == -1ULL)
187 			perf = this_perf;
188 
189 		if (perf != this_perf) {
190 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
191 			break;
192 		}
193 	}
194 
195 	if (perf == -1ULL)
196 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
197 
198 	return perf;
199 }
200 #else
201 static u64 hwprobe_misaligned(const struct cpumask *cpus)
202 {
203 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
204 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
205 
206 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
207 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
208 
209 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
210 }
211 #endif
212 
213 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
214 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
215 {
216 	int cpu;
217 	u64 perf = -1ULL;
218 
219 	/* Return if supported or not even if speed wasn't probed */
220 	for_each_cpu(cpu, cpus) {
221 		int this_perf = per_cpu(vector_misaligned_access, cpu);
222 
223 		if (perf == -1ULL)
224 			perf = this_perf;
225 
226 		if (perf != this_perf) {
227 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
228 			break;
229 		}
230 	}
231 
232 	if (perf == -1ULL)
233 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
234 
235 	return perf;
236 }
237 #else
238 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
239 {
240 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
241 		return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
242 
243 	if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
244 		return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
245 
246 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
247 }
248 #endif
249 
250 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
251 			     const struct cpumask *cpus)
252 {
253 	switch (pair->key) {
254 	case RISCV_HWPROBE_KEY_MVENDORID:
255 	case RISCV_HWPROBE_KEY_MARCHID:
256 	case RISCV_HWPROBE_KEY_MIMPID:
257 		hwprobe_arch_id(pair, cpus);
258 		break;
259 	/*
260 	 * The kernel already assumes that the base single-letter ISA
261 	 * extensions are supported on all harts, and only supports the
262 	 * IMA base, so just cheat a bit here and tell that to
263 	 * userspace.
264 	 */
265 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
266 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
267 		break;
268 
269 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
270 		hwprobe_isa_ext0(pair, cpus);
271 		break;
272 
273 	case RISCV_HWPROBE_KEY_CPUPERF_0:
274 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
275 		pair->value = hwprobe_misaligned(cpus);
276 		break;
277 
278 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
279 		pair->value = hwprobe_vec_misaligned(cpus);
280 		break;
281 
282 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
283 		pair->value = 0;
284 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
285 			pair->value = riscv_cboz_block_size;
286 		break;
287 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
288 		pair->value = user_max_virt_addr();
289 		break;
290 
291 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
292 		pair->value = riscv_timebase;
293 		break;
294 
295 	case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
296 		hwprobe_isa_vendor_ext_thead_0(pair, cpus);
297 		break;
298 
299 	/*
300 	 * For forward compatibility, unknown keys don't fail the whole
301 	 * call, but get their element key set to -1 and value set to 0
302 	 * indicating they're unrecognized.
303 	 */
304 	default:
305 		pair->key = -1;
306 		pair->value = 0;
307 		break;
308 	}
309 }
310 
311 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
312 			      size_t pair_count, size_t cpusetsize,
313 			      unsigned long __user *cpus_user,
314 			      unsigned int flags)
315 {
316 	size_t out;
317 	int ret;
318 	cpumask_t cpus;
319 
320 	/* Check the reserved flags. */
321 	if (flags != 0)
322 		return -EINVAL;
323 
324 	/*
325 	 * The interface supports taking in a CPU mask, and returns values that
326 	 * are consistent across that mask. Allow userspace to specify NULL and
327 	 * 0 as a shortcut to all online CPUs.
328 	 */
329 	cpumask_clear(&cpus);
330 	if (!cpusetsize && !cpus_user) {
331 		cpumask_copy(&cpus, cpu_online_mask);
332 	} else {
333 		if (cpusetsize > cpumask_size())
334 			cpusetsize = cpumask_size();
335 
336 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
337 		if (ret)
338 			return -EFAULT;
339 
340 		/*
341 		 * Userspace must provide at least one online CPU, without that
342 		 * there's no way to define what is supported.
343 		 */
344 		cpumask_and(&cpus, &cpus, cpu_online_mask);
345 		if (cpumask_empty(&cpus))
346 			return -EINVAL;
347 	}
348 
349 	for (out = 0; out < pair_count; out++, pairs++) {
350 		struct riscv_hwprobe pair;
351 
352 		if (get_user(pair.key, &pairs->key))
353 			return -EFAULT;
354 
355 		pair.value = 0;
356 		hwprobe_one_pair(&pair, &cpus);
357 		ret = put_user(pair.key, &pairs->key);
358 		if (ret == 0)
359 			ret = put_user(pair.value, &pairs->value);
360 
361 		if (ret)
362 			return -EFAULT;
363 	}
364 
365 	return 0;
366 }
367 
368 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
369 			    size_t pair_count, size_t cpusetsize,
370 			    unsigned long __user *cpus_user,
371 			    unsigned int flags)
372 {
373 	cpumask_t cpus, one_cpu;
374 	bool clear_all = false;
375 	size_t i;
376 	int ret;
377 
378 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
379 		return -EINVAL;
380 
381 	if (!cpusetsize || !cpus_user)
382 		return -EINVAL;
383 
384 	if (cpusetsize > cpumask_size())
385 		cpusetsize = cpumask_size();
386 
387 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
388 	if (ret)
389 		return -EFAULT;
390 
391 	if (cpumask_empty(&cpus))
392 		cpumask_copy(&cpus, cpu_online_mask);
393 
394 	cpumask_and(&cpus, &cpus, cpu_online_mask);
395 
396 	cpumask_clear(&one_cpu);
397 
398 	for (i = 0; i < pair_count; i++) {
399 		struct riscv_hwprobe pair, tmp;
400 		int cpu;
401 
402 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
403 		if (ret)
404 			return -EFAULT;
405 
406 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
407 			clear_all = true;
408 			pair = (struct riscv_hwprobe){ .key = -1, };
409 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
410 			if (ret)
411 				return -EFAULT;
412 		}
413 
414 		if (clear_all)
415 			continue;
416 
417 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
418 
419 		for_each_cpu(cpu, &cpus) {
420 			cpumask_set_cpu(cpu, &one_cpu);
421 
422 			hwprobe_one_pair(&tmp, &one_cpu);
423 
424 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
425 				cpumask_clear_cpu(cpu, &cpus);
426 
427 			cpumask_clear_cpu(cpu, &one_cpu);
428 		}
429 	}
430 
431 	if (clear_all)
432 		cpumask_clear(&cpus);
433 
434 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
435 	if (ret)
436 		return -EFAULT;
437 
438 	return 0;
439 }
440 
441 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
442 			    size_t pair_count, size_t cpusetsize,
443 			    unsigned long __user *cpus_user,
444 			    unsigned int flags)
445 {
446 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
447 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
448 					cpus_user, flags);
449 
450 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
451 				  cpus_user, flags);
452 }
453 
454 #ifdef CONFIG_MMU
455 
456 static int __init init_hwprobe_vdso_data(void)
457 {
458 	struct vdso_data *vd = __arch_get_k_vdso_data();
459 	struct arch_vdso_time_data *avd = &vd->arch_data;
460 	u64 id_bitsmash = 0;
461 	struct riscv_hwprobe pair;
462 	int key;
463 
464 	/*
465 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
466 	 * save a syscall in the common case.
467 	 */
468 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
469 		pair.key = key;
470 		hwprobe_one_pair(&pair, cpu_online_mask);
471 
472 		WARN_ON_ONCE(pair.key < 0);
473 
474 		avd->all_cpu_hwprobe_values[key] = pair.value;
475 		/*
476 		 * Smash together the vendor, arch, and impl IDs to see if
477 		 * they're all 0 or any negative.
478 		 */
479 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
480 			id_bitsmash |= pair.value;
481 	}
482 
483 	/*
484 	 * If the arch, vendor, and implementation ID are all the same across
485 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
486 	 * answer queries for arbitrary masks. However if all values are 0 (not
487 	 * populated) or any value returns -1 (varies across CPUs), then the
488 	 * vDSO should defer to the kernel for exotic cpu masks.
489 	 */
490 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
491 	return 0;
492 }
493 
494 arch_initcall_sync(init_hwprobe_vdso_data);
495 
496 #endif /* CONFIG_MMU */
497 
498 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
499 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
500 		cpus, unsigned int, flags)
501 {
502 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
503 				cpus, flags);
504 }
505