xref: /linux/arch/riscv/kernel/sys_hwprobe.c (revision d1703dc7bc8ec7adb91f5ceaf1556ff1ed212858)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
13 #include <asm/sbi.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <vdso/vsyscall.h>
19 
20 
21 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
22 			    const struct cpumask *cpus)
23 {
24 	u64 id = -1ULL;
25 	bool first = true;
26 	int cpu;
27 
28 	for_each_cpu(cpu, cpus) {
29 		u64 cpu_id;
30 
31 		switch (pair->key) {
32 		case RISCV_HWPROBE_KEY_MVENDORID:
33 			cpu_id = riscv_cached_mvendorid(cpu);
34 			break;
35 		case RISCV_HWPROBE_KEY_MIMPID:
36 			cpu_id = riscv_cached_mimpid(cpu);
37 			break;
38 		case RISCV_HWPROBE_KEY_MARCHID:
39 			cpu_id = riscv_cached_marchid(cpu);
40 			break;
41 		}
42 
43 		if (first) {
44 			id = cpu_id;
45 			first = false;
46 		}
47 
48 		/*
49 		 * If there's a mismatch for the given set, return -1 in the
50 		 * value.
51 		 */
52 		if (id != cpu_id) {
53 			id = -1ULL;
54 			break;
55 		}
56 	}
57 
58 	pair->value = id;
59 }
60 
61 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
62 			     const struct cpumask *cpus)
63 {
64 	int cpu;
65 	u64 missing = 0;
66 
67 	pair->value = 0;
68 	if (has_fpu())
69 		pair->value |= RISCV_HWPROBE_IMA_FD;
70 
71 	if (riscv_isa_extension_available(NULL, c))
72 		pair->value |= RISCV_HWPROBE_IMA_C;
73 
74 	if (has_vector() && riscv_isa_extension_available(NULL, v))
75 		pair->value |= RISCV_HWPROBE_IMA_V;
76 
77 	/*
78 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
79 	 * doesn't have.
80 	 */
81 	for_each_cpu(cpu, cpus) {
82 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
83 
84 #define EXT_KEY(ext)									\
85 	do {										\
86 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
87 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
88 		else									\
89 			missing |= RISCV_HWPROBE_EXT_##ext;				\
90 	} while (false)
91 
92 		/*
93 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
94 		 * regardless of the kernel's configuration, as no other checks, besides
95 		 * presence in the hart_isa bitmap, are made.
96 		 */
97 		EXT_KEY(ZACAS);
98 		EXT_KEY(ZAWRS);
99 		EXT_KEY(ZBA);
100 		EXT_KEY(ZBB);
101 		EXT_KEY(ZBC);
102 		EXT_KEY(ZBKB);
103 		EXT_KEY(ZBKC);
104 		EXT_KEY(ZBKX);
105 		EXT_KEY(ZBS);
106 		EXT_KEY(ZCA);
107 		EXT_KEY(ZCB);
108 		EXT_KEY(ZCMOP);
109 		EXT_KEY(ZICBOZ);
110 		EXT_KEY(ZICOND);
111 		EXT_KEY(ZIHINTNTL);
112 		EXT_KEY(ZIHINTPAUSE);
113 		EXT_KEY(ZIMOP);
114 		EXT_KEY(ZKND);
115 		EXT_KEY(ZKNE);
116 		EXT_KEY(ZKNH);
117 		EXT_KEY(ZKSED);
118 		EXT_KEY(ZKSH);
119 		EXT_KEY(ZKT);
120 		EXT_KEY(ZTSO);
121 
122 		/*
123 		 * All the following extensions must depend on the kernel
124 		 * support of V.
125 		 */
126 		if (has_vector()) {
127 			EXT_KEY(ZVBB);
128 			EXT_KEY(ZVBC);
129 			EXT_KEY(ZVE32F);
130 			EXT_KEY(ZVE32X);
131 			EXT_KEY(ZVE64D);
132 			EXT_KEY(ZVE64F);
133 			EXT_KEY(ZVE64X);
134 			EXT_KEY(ZVFH);
135 			EXT_KEY(ZVFHMIN);
136 			EXT_KEY(ZVKB);
137 			EXT_KEY(ZVKG);
138 			EXT_KEY(ZVKNED);
139 			EXT_KEY(ZVKNHA);
140 			EXT_KEY(ZVKNHB);
141 			EXT_KEY(ZVKSED);
142 			EXT_KEY(ZVKSH);
143 			EXT_KEY(ZVKT);
144 		}
145 
146 		if (has_fpu()) {
147 			EXT_KEY(ZCD);
148 			EXT_KEY(ZCF);
149 			EXT_KEY(ZFA);
150 			EXT_KEY(ZFH);
151 			EXT_KEY(ZFHMIN);
152 		}
153 #undef EXT_KEY
154 	}
155 
156 	/* Now turn off reporting features if any CPU is missing it. */
157 	pair->value &= ~missing;
158 }
159 
160 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
161 {
162 	struct riscv_hwprobe pair;
163 
164 	hwprobe_isa_ext0(&pair, cpus);
165 	return (pair.value & ext);
166 }
167 
168 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
169 static u64 hwprobe_misaligned(const struct cpumask *cpus)
170 {
171 	int cpu;
172 	u64 perf = -1ULL;
173 
174 	for_each_cpu(cpu, cpus) {
175 		int this_perf = per_cpu(misaligned_access_speed, cpu);
176 
177 		if (perf == -1ULL)
178 			perf = this_perf;
179 
180 		if (perf != this_perf) {
181 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
182 			break;
183 		}
184 	}
185 
186 	if (perf == -1ULL)
187 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
188 
189 	return perf;
190 }
191 #else
192 static u64 hwprobe_misaligned(const struct cpumask *cpus)
193 {
194 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
195 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
196 
197 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
198 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
199 
200 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
201 }
202 #endif
203 
204 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
205 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
206 {
207 	int cpu;
208 	u64 perf = -1ULL;
209 
210 	/* Return if supported or not even if speed wasn't probed */
211 	for_each_cpu(cpu, cpus) {
212 		int this_perf = per_cpu(vector_misaligned_access, cpu);
213 
214 		if (perf == -1ULL)
215 			perf = this_perf;
216 
217 		if (perf != this_perf) {
218 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
219 			break;
220 		}
221 	}
222 
223 	if (perf == -1ULL)
224 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
225 
226 	return perf;
227 }
228 #else
229 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
230 {
231 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
232 }
233 #endif
234 
235 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
236 			     const struct cpumask *cpus)
237 {
238 	switch (pair->key) {
239 	case RISCV_HWPROBE_KEY_MVENDORID:
240 	case RISCV_HWPROBE_KEY_MARCHID:
241 	case RISCV_HWPROBE_KEY_MIMPID:
242 		hwprobe_arch_id(pair, cpus);
243 		break;
244 	/*
245 	 * The kernel already assumes that the base single-letter ISA
246 	 * extensions are supported on all harts, and only supports the
247 	 * IMA base, so just cheat a bit here and tell that to
248 	 * userspace.
249 	 */
250 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
251 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
252 		break;
253 
254 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
255 		hwprobe_isa_ext0(pair, cpus);
256 		break;
257 
258 	case RISCV_HWPROBE_KEY_CPUPERF_0:
259 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
260 		pair->value = hwprobe_misaligned(cpus);
261 		break;
262 
263 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
264 		pair->value = hwprobe_vec_misaligned(cpus);
265 		break;
266 
267 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
268 		pair->value = 0;
269 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
270 			pair->value = riscv_cboz_block_size;
271 		break;
272 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
273 		pair->value = user_max_virt_addr();
274 		break;
275 
276 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
277 		pair->value = riscv_timebase;
278 		break;
279 
280 	/*
281 	 * For forward compatibility, unknown keys don't fail the whole
282 	 * call, but get their element key set to -1 and value set to 0
283 	 * indicating they're unrecognized.
284 	 */
285 	default:
286 		pair->key = -1;
287 		pair->value = 0;
288 		break;
289 	}
290 }
291 
292 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
293 			      size_t pair_count, size_t cpusetsize,
294 			      unsigned long __user *cpus_user,
295 			      unsigned int flags)
296 {
297 	size_t out;
298 	int ret;
299 	cpumask_t cpus;
300 
301 	/* Check the reserved flags. */
302 	if (flags != 0)
303 		return -EINVAL;
304 
305 	/*
306 	 * The interface supports taking in a CPU mask, and returns values that
307 	 * are consistent across that mask. Allow userspace to specify NULL and
308 	 * 0 as a shortcut to all online CPUs.
309 	 */
310 	cpumask_clear(&cpus);
311 	if (!cpusetsize && !cpus_user) {
312 		cpumask_copy(&cpus, cpu_online_mask);
313 	} else {
314 		if (cpusetsize > cpumask_size())
315 			cpusetsize = cpumask_size();
316 
317 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
318 		if (ret)
319 			return -EFAULT;
320 
321 		/*
322 		 * Userspace must provide at least one online CPU, without that
323 		 * there's no way to define what is supported.
324 		 */
325 		cpumask_and(&cpus, &cpus, cpu_online_mask);
326 		if (cpumask_empty(&cpus))
327 			return -EINVAL;
328 	}
329 
330 	for (out = 0; out < pair_count; out++, pairs++) {
331 		struct riscv_hwprobe pair;
332 
333 		if (get_user(pair.key, &pairs->key))
334 			return -EFAULT;
335 
336 		pair.value = 0;
337 		hwprobe_one_pair(&pair, &cpus);
338 		ret = put_user(pair.key, &pairs->key);
339 		if (ret == 0)
340 			ret = put_user(pair.value, &pairs->value);
341 
342 		if (ret)
343 			return -EFAULT;
344 	}
345 
346 	return 0;
347 }
348 
349 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
350 			    size_t pair_count, size_t cpusetsize,
351 			    unsigned long __user *cpus_user,
352 			    unsigned int flags)
353 {
354 	cpumask_t cpus, one_cpu;
355 	bool clear_all = false;
356 	size_t i;
357 	int ret;
358 
359 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
360 		return -EINVAL;
361 
362 	if (!cpusetsize || !cpus_user)
363 		return -EINVAL;
364 
365 	if (cpusetsize > cpumask_size())
366 		cpusetsize = cpumask_size();
367 
368 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
369 	if (ret)
370 		return -EFAULT;
371 
372 	if (cpumask_empty(&cpus))
373 		cpumask_copy(&cpus, cpu_online_mask);
374 
375 	cpumask_and(&cpus, &cpus, cpu_online_mask);
376 
377 	cpumask_clear(&one_cpu);
378 
379 	for (i = 0; i < pair_count; i++) {
380 		struct riscv_hwprobe pair, tmp;
381 		int cpu;
382 
383 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
384 		if (ret)
385 			return -EFAULT;
386 
387 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
388 			clear_all = true;
389 			pair = (struct riscv_hwprobe){ .key = -1, };
390 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
391 			if (ret)
392 				return -EFAULT;
393 		}
394 
395 		if (clear_all)
396 			continue;
397 
398 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
399 
400 		for_each_cpu(cpu, &cpus) {
401 			cpumask_set_cpu(cpu, &one_cpu);
402 
403 			hwprobe_one_pair(&tmp, &one_cpu);
404 
405 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
406 				cpumask_clear_cpu(cpu, &cpus);
407 
408 			cpumask_clear_cpu(cpu, &one_cpu);
409 		}
410 	}
411 
412 	if (clear_all)
413 		cpumask_clear(&cpus);
414 
415 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
416 	if (ret)
417 		return -EFAULT;
418 
419 	return 0;
420 }
421 
422 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
423 			    size_t pair_count, size_t cpusetsize,
424 			    unsigned long __user *cpus_user,
425 			    unsigned int flags)
426 {
427 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
428 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
429 					cpus_user, flags);
430 
431 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
432 				  cpus_user, flags);
433 }
434 
435 #ifdef CONFIG_MMU
436 
437 static int __init init_hwprobe_vdso_data(void)
438 {
439 	struct vdso_data *vd = __arch_get_k_vdso_data();
440 	struct arch_vdso_data *avd = &vd->arch_data;
441 	u64 id_bitsmash = 0;
442 	struct riscv_hwprobe pair;
443 	int key;
444 
445 	/*
446 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
447 	 * save a syscall in the common case.
448 	 */
449 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
450 		pair.key = key;
451 		hwprobe_one_pair(&pair, cpu_online_mask);
452 
453 		WARN_ON_ONCE(pair.key < 0);
454 
455 		avd->all_cpu_hwprobe_values[key] = pair.value;
456 		/*
457 		 * Smash together the vendor, arch, and impl IDs to see if
458 		 * they're all 0 or any negative.
459 		 */
460 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
461 			id_bitsmash |= pair.value;
462 	}
463 
464 	/*
465 	 * If the arch, vendor, and implementation ID are all the same across
466 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
467 	 * answer queries for arbitrary masks. However if all values are 0 (not
468 	 * populated) or any value returns -1 (varies across CPUs), then the
469 	 * vDSO should defer to the kernel for exotic cpu masks.
470 	 */
471 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
472 	return 0;
473 }
474 
475 arch_initcall_sync(init_hwprobe_vdso_data);
476 
477 #endif /* CONFIG_MMU */
478 
479 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
480 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
481 		cpus, unsigned int, flags)
482 {
483 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
484 				cpus, flags);
485 }
486