xref: /linux/arch/riscv/kernel/sys_hwprobe.c (revision a4863e002cf0dd6fb2f06796f16d7bc0974e9845)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
13 #include <asm/sbi.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <vdso/vsyscall.h>
19 
20 
21 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
22 			    const struct cpumask *cpus)
23 {
24 	u64 id = -1ULL;
25 	bool first = true;
26 	int cpu;
27 
28 	for_each_cpu(cpu, cpus) {
29 		u64 cpu_id;
30 
31 		switch (pair->key) {
32 		case RISCV_HWPROBE_KEY_MVENDORID:
33 			cpu_id = riscv_cached_mvendorid(cpu);
34 			break;
35 		case RISCV_HWPROBE_KEY_MIMPID:
36 			cpu_id = riscv_cached_mimpid(cpu);
37 			break;
38 		case RISCV_HWPROBE_KEY_MARCHID:
39 			cpu_id = riscv_cached_marchid(cpu);
40 			break;
41 		}
42 
43 		if (first) {
44 			id = cpu_id;
45 			first = false;
46 		}
47 
48 		/*
49 		 * If there's a mismatch for the given set, return -1 in the
50 		 * value.
51 		 */
52 		if (id != cpu_id) {
53 			id = -1ULL;
54 			break;
55 		}
56 	}
57 
58 	pair->value = id;
59 }
60 
61 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
62 			     const struct cpumask *cpus)
63 {
64 	int cpu;
65 	u64 missing = 0;
66 
67 	pair->value = 0;
68 	if (has_fpu())
69 		pair->value |= RISCV_HWPROBE_IMA_FD;
70 
71 	if (riscv_isa_extension_available(NULL, c))
72 		pair->value |= RISCV_HWPROBE_IMA_C;
73 
74 	if (has_vector() && riscv_isa_extension_available(NULL, v))
75 		pair->value |= RISCV_HWPROBE_IMA_V;
76 
77 	/*
78 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
79 	 * doesn't have.
80 	 */
81 	for_each_cpu(cpu, cpus) {
82 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
83 
84 #define EXT_KEY(ext)									\
85 	do {										\
86 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
87 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
88 		else									\
89 			missing |= RISCV_HWPROBE_EXT_##ext;				\
90 	} while (false)
91 
92 		/*
93 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
94 		 * regardless of the kernel's configuration, as no other checks, besides
95 		 * presence in the hart_isa bitmap, are made.
96 		 */
97 		EXT_KEY(ZACAS);
98 		EXT_KEY(ZAWRS);
99 		EXT_KEY(ZBA);
100 		EXT_KEY(ZBB);
101 		EXT_KEY(ZBC);
102 		EXT_KEY(ZBKB);
103 		EXT_KEY(ZBKC);
104 		EXT_KEY(ZBKX);
105 		EXT_KEY(ZBS);
106 		EXT_KEY(ZCA);
107 		EXT_KEY(ZCB);
108 		EXT_KEY(ZCMOP);
109 		EXT_KEY(ZICBOZ);
110 		EXT_KEY(ZICOND);
111 		EXT_KEY(ZIHINTNTL);
112 		EXT_KEY(ZIHINTPAUSE);
113 		EXT_KEY(ZIMOP);
114 		EXT_KEY(ZKND);
115 		EXT_KEY(ZKNE);
116 		EXT_KEY(ZKNH);
117 		EXT_KEY(ZKSED);
118 		EXT_KEY(ZKSH);
119 		EXT_KEY(ZKT);
120 		EXT_KEY(ZTSO);
121 
122 		/*
123 		 * All the following extensions must depend on the kernel
124 		 * support of V.
125 		 */
126 		if (has_vector()) {
127 			EXT_KEY(ZVBB);
128 			EXT_KEY(ZVBC);
129 			EXT_KEY(ZVE32F);
130 			EXT_KEY(ZVE32X);
131 			EXT_KEY(ZVE64D);
132 			EXT_KEY(ZVE64F);
133 			EXT_KEY(ZVE64X);
134 			EXT_KEY(ZVFBFMIN);
135 			EXT_KEY(ZVFBFWMA);
136 			EXT_KEY(ZVFH);
137 			EXT_KEY(ZVFHMIN);
138 			EXT_KEY(ZVKB);
139 			EXT_KEY(ZVKG);
140 			EXT_KEY(ZVKNED);
141 			EXT_KEY(ZVKNHA);
142 			EXT_KEY(ZVKNHB);
143 			EXT_KEY(ZVKSED);
144 			EXT_KEY(ZVKSH);
145 			EXT_KEY(ZVKT);
146 		}
147 
148 		if (has_fpu()) {
149 			EXT_KEY(ZCD);
150 			EXT_KEY(ZCF);
151 			EXT_KEY(ZFA);
152 			EXT_KEY(ZFBFMIN);
153 			EXT_KEY(ZFH);
154 			EXT_KEY(ZFHMIN);
155 		}
156 
157 		if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
158 			EXT_KEY(SUPM);
159 #undef EXT_KEY
160 	}
161 
162 	/* Now turn off reporting features if any CPU is missing it. */
163 	pair->value &= ~missing;
164 }
165 
166 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
167 {
168 	struct riscv_hwprobe pair;
169 
170 	hwprobe_isa_ext0(&pair, cpus);
171 	return (pair.value & ext);
172 }
173 
174 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
175 static u64 hwprobe_misaligned(const struct cpumask *cpus)
176 {
177 	int cpu;
178 	u64 perf = -1ULL;
179 
180 	for_each_cpu(cpu, cpus) {
181 		int this_perf = per_cpu(misaligned_access_speed, cpu);
182 
183 		if (perf == -1ULL)
184 			perf = this_perf;
185 
186 		if (perf != this_perf) {
187 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
188 			break;
189 		}
190 	}
191 
192 	if (perf == -1ULL)
193 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
194 
195 	return perf;
196 }
197 #else
198 static u64 hwprobe_misaligned(const struct cpumask *cpus)
199 {
200 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
201 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
202 
203 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
204 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
205 
206 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
207 }
208 #endif
209 
210 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
211 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
212 {
213 	int cpu;
214 	u64 perf = -1ULL;
215 
216 	/* Return if supported or not even if speed wasn't probed */
217 	for_each_cpu(cpu, cpus) {
218 		int this_perf = per_cpu(vector_misaligned_access, cpu);
219 
220 		if (perf == -1ULL)
221 			perf = this_perf;
222 
223 		if (perf != this_perf) {
224 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
225 			break;
226 		}
227 	}
228 
229 	if (perf == -1ULL)
230 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
231 
232 	return perf;
233 }
234 #else
235 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
236 {
237 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
238 		return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
239 
240 	if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
241 		return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
242 
243 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
244 }
245 #endif
246 
247 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
248 			     const struct cpumask *cpus)
249 {
250 	switch (pair->key) {
251 	case RISCV_HWPROBE_KEY_MVENDORID:
252 	case RISCV_HWPROBE_KEY_MARCHID:
253 	case RISCV_HWPROBE_KEY_MIMPID:
254 		hwprobe_arch_id(pair, cpus);
255 		break;
256 	/*
257 	 * The kernel already assumes that the base single-letter ISA
258 	 * extensions are supported on all harts, and only supports the
259 	 * IMA base, so just cheat a bit here and tell that to
260 	 * userspace.
261 	 */
262 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
263 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
264 		break;
265 
266 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
267 		hwprobe_isa_ext0(pair, cpus);
268 		break;
269 
270 	case RISCV_HWPROBE_KEY_CPUPERF_0:
271 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
272 		pair->value = hwprobe_misaligned(cpus);
273 		break;
274 
275 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
276 		pair->value = hwprobe_vec_misaligned(cpus);
277 		break;
278 
279 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
280 		pair->value = 0;
281 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
282 			pair->value = riscv_cboz_block_size;
283 		break;
284 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
285 		pair->value = user_max_virt_addr();
286 		break;
287 
288 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
289 		pair->value = riscv_timebase;
290 		break;
291 
292 	/*
293 	 * For forward compatibility, unknown keys don't fail the whole
294 	 * call, but get their element key set to -1 and value set to 0
295 	 * indicating they're unrecognized.
296 	 */
297 	default:
298 		pair->key = -1;
299 		pair->value = 0;
300 		break;
301 	}
302 }
303 
304 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
305 			      size_t pair_count, size_t cpusetsize,
306 			      unsigned long __user *cpus_user,
307 			      unsigned int flags)
308 {
309 	size_t out;
310 	int ret;
311 	cpumask_t cpus;
312 
313 	/* Check the reserved flags. */
314 	if (flags != 0)
315 		return -EINVAL;
316 
317 	/*
318 	 * The interface supports taking in a CPU mask, and returns values that
319 	 * are consistent across that mask. Allow userspace to specify NULL and
320 	 * 0 as a shortcut to all online CPUs.
321 	 */
322 	cpumask_clear(&cpus);
323 	if (!cpusetsize && !cpus_user) {
324 		cpumask_copy(&cpus, cpu_online_mask);
325 	} else {
326 		if (cpusetsize > cpumask_size())
327 			cpusetsize = cpumask_size();
328 
329 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
330 		if (ret)
331 			return -EFAULT;
332 
333 		/*
334 		 * Userspace must provide at least one online CPU, without that
335 		 * there's no way to define what is supported.
336 		 */
337 		cpumask_and(&cpus, &cpus, cpu_online_mask);
338 		if (cpumask_empty(&cpus))
339 			return -EINVAL;
340 	}
341 
342 	for (out = 0; out < pair_count; out++, pairs++) {
343 		struct riscv_hwprobe pair;
344 
345 		if (get_user(pair.key, &pairs->key))
346 			return -EFAULT;
347 
348 		pair.value = 0;
349 		hwprobe_one_pair(&pair, &cpus);
350 		ret = put_user(pair.key, &pairs->key);
351 		if (ret == 0)
352 			ret = put_user(pair.value, &pairs->value);
353 
354 		if (ret)
355 			return -EFAULT;
356 	}
357 
358 	return 0;
359 }
360 
361 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
362 			    size_t pair_count, size_t cpusetsize,
363 			    unsigned long __user *cpus_user,
364 			    unsigned int flags)
365 {
366 	cpumask_t cpus, one_cpu;
367 	bool clear_all = false;
368 	size_t i;
369 	int ret;
370 
371 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
372 		return -EINVAL;
373 
374 	if (!cpusetsize || !cpus_user)
375 		return -EINVAL;
376 
377 	if (cpusetsize > cpumask_size())
378 		cpusetsize = cpumask_size();
379 
380 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
381 	if (ret)
382 		return -EFAULT;
383 
384 	if (cpumask_empty(&cpus))
385 		cpumask_copy(&cpus, cpu_online_mask);
386 
387 	cpumask_and(&cpus, &cpus, cpu_online_mask);
388 
389 	cpumask_clear(&one_cpu);
390 
391 	for (i = 0; i < pair_count; i++) {
392 		struct riscv_hwprobe pair, tmp;
393 		int cpu;
394 
395 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
396 		if (ret)
397 			return -EFAULT;
398 
399 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
400 			clear_all = true;
401 			pair = (struct riscv_hwprobe){ .key = -1, };
402 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
403 			if (ret)
404 				return -EFAULT;
405 		}
406 
407 		if (clear_all)
408 			continue;
409 
410 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
411 
412 		for_each_cpu(cpu, &cpus) {
413 			cpumask_set_cpu(cpu, &one_cpu);
414 
415 			hwprobe_one_pair(&tmp, &one_cpu);
416 
417 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
418 				cpumask_clear_cpu(cpu, &cpus);
419 
420 			cpumask_clear_cpu(cpu, &one_cpu);
421 		}
422 	}
423 
424 	if (clear_all)
425 		cpumask_clear(&cpus);
426 
427 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
428 	if (ret)
429 		return -EFAULT;
430 
431 	return 0;
432 }
433 
434 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
435 			    size_t pair_count, size_t cpusetsize,
436 			    unsigned long __user *cpus_user,
437 			    unsigned int flags)
438 {
439 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
440 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
441 					cpus_user, flags);
442 
443 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
444 				  cpus_user, flags);
445 }
446 
447 #ifdef CONFIG_MMU
448 
449 static int __init init_hwprobe_vdso_data(void)
450 {
451 	struct vdso_data *vd = __arch_get_k_vdso_data();
452 	struct arch_vdso_time_data *avd = &vd->arch_data;
453 	u64 id_bitsmash = 0;
454 	struct riscv_hwprobe pair;
455 	int key;
456 
457 	/*
458 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
459 	 * save a syscall in the common case.
460 	 */
461 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
462 		pair.key = key;
463 		hwprobe_one_pair(&pair, cpu_online_mask);
464 
465 		WARN_ON_ONCE(pair.key < 0);
466 
467 		avd->all_cpu_hwprobe_values[key] = pair.value;
468 		/*
469 		 * Smash together the vendor, arch, and impl IDs to see if
470 		 * they're all 0 or any negative.
471 		 */
472 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
473 			id_bitsmash |= pair.value;
474 	}
475 
476 	/*
477 	 * If the arch, vendor, and implementation ID are all the same across
478 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
479 	 * answer queries for arbitrary masks. However if all values are 0 (not
480 	 * populated) or any value returns -1 (varies across CPUs), then the
481 	 * vDSO should defer to the kernel for exotic cpu masks.
482 	 */
483 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
484 	return 0;
485 }
486 
487 arch_initcall_sync(init_hwprobe_vdso_data);
488 
489 #endif /* CONFIG_MMU */
490 
491 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
492 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
493 		cpus, unsigned int, flags)
494 {
495 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
496 				cpus, flags);
497 }
498