xref: /linux/arch/riscv/kernel/sys_hwprobe.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/sbi.h>
12 #include <asm/switch_to.h>
13 #include <asm/uaccess.h>
14 #include <asm/unistd.h>
15 #include <asm/vector.h>
16 #include <vdso/vsyscall.h>
17 
18 
19 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
20 			    const struct cpumask *cpus)
21 {
22 	u64 id = -1ULL;
23 	bool first = true;
24 	int cpu;
25 
26 	for_each_cpu(cpu, cpus) {
27 		u64 cpu_id;
28 
29 		switch (pair->key) {
30 		case RISCV_HWPROBE_KEY_MVENDORID:
31 			cpu_id = riscv_cached_mvendorid(cpu);
32 			break;
33 		case RISCV_HWPROBE_KEY_MIMPID:
34 			cpu_id = riscv_cached_mimpid(cpu);
35 			break;
36 		case RISCV_HWPROBE_KEY_MARCHID:
37 			cpu_id = riscv_cached_marchid(cpu);
38 			break;
39 		}
40 
41 		if (first) {
42 			id = cpu_id;
43 			first = false;
44 		}
45 
46 		/*
47 		 * If there's a mismatch for the given set, return -1 in the
48 		 * value.
49 		 */
50 		if (id != cpu_id) {
51 			id = -1ULL;
52 			break;
53 		}
54 	}
55 
56 	pair->value = id;
57 }
58 
59 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
60 			     const struct cpumask *cpus)
61 {
62 	int cpu;
63 	u64 missing = 0;
64 
65 	pair->value = 0;
66 	if (has_fpu())
67 		pair->value |= RISCV_HWPROBE_IMA_FD;
68 
69 	if (riscv_isa_extension_available(NULL, c))
70 		pair->value |= RISCV_HWPROBE_IMA_C;
71 
72 	if (has_vector())
73 		pair->value |= RISCV_HWPROBE_IMA_V;
74 
75 	/*
76 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
77 	 * doesn't have.
78 	 */
79 	for_each_cpu(cpu, cpus) {
80 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
81 
82 #define EXT_KEY(ext)									\
83 	do {										\
84 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
85 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
86 		else									\
87 			missing |= RISCV_HWPROBE_EXT_##ext;				\
88 	} while (false)
89 
90 		/*
91 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
92 		 * regardless of the kernel's configuration, as no other checks, besides
93 		 * presence in the hart_isa bitmap, are made.
94 		 */
95 		EXT_KEY(ZBA);
96 		EXT_KEY(ZBB);
97 		EXT_KEY(ZBS);
98 		EXT_KEY(ZICBOZ);
99 		EXT_KEY(ZBC);
100 
101 		EXT_KEY(ZBKB);
102 		EXT_KEY(ZBKC);
103 		EXT_KEY(ZBKX);
104 		EXT_KEY(ZKND);
105 		EXT_KEY(ZKNE);
106 		EXT_KEY(ZKNH);
107 		EXT_KEY(ZKSED);
108 		EXT_KEY(ZKSH);
109 		EXT_KEY(ZKT);
110 		EXT_KEY(ZIHINTNTL);
111 		EXT_KEY(ZTSO);
112 		EXT_KEY(ZACAS);
113 		EXT_KEY(ZICOND);
114 		EXT_KEY(ZIHINTPAUSE);
115 
116 		if (has_vector()) {
117 			EXT_KEY(ZVBB);
118 			EXT_KEY(ZVBC);
119 			EXT_KEY(ZVKB);
120 			EXT_KEY(ZVKG);
121 			EXT_KEY(ZVKNED);
122 			EXT_KEY(ZVKNHA);
123 			EXT_KEY(ZVKNHB);
124 			EXT_KEY(ZVKSED);
125 			EXT_KEY(ZVKSH);
126 			EXT_KEY(ZVKT);
127 			EXT_KEY(ZVFH);
128 			EXT_KEY(ZVFHMIN);
129 		}
130 
131 		if (has_fpu()) {
132 			EXT_KEY(ZFH);
133 			EXT_KEY(ZFHMIN);
134 			EXT_KEY(ZFA);
135 		}
136 #undef EXT_KEY
137 	}
138 
139 	/* Now turn off reporting features if any CPU is missing it. */
140 	pair->value &= ~missing;
141 }
142 
143 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
144 {
145 	struct riscv_hwprobe pair;
146 
147 	hwprobe_isa_ext0(&pair, cpus);
148 	return (pair.value & ext);
149 }
150 
151 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
152 static u64 hwprobe_misaligned(const struct cpumask *cpus)
153 {
154 	int cpu;
155 	u64 perf = -1ULL;
156 
157 	for_each_cpu(cpu, cpus) {
158 		int this_perf = per_cpu(misaligned_access_speed, cpu);
159 
160 		if (perf == -1ULL)
161 			perf = this_perf;
162 
163 		if (perf != this_perf) {
164 			perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
165 			break;
166 		}
167 	}
168 
169 	if (perf == -1ULL)
170 		return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
171 
172 	return perf;
173 }
174 #else
175 static u64 hwprobe_misaligned(const struct cpumask *cpus)
176 {
177 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
178 		return RISCV_HWPROBE_MISALIGNED_FAST;
179 
180 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
181 		return RISCV_HWPROBE_MISALIGNED_EMULATED;
182 
183 	return RISCV_HWPROBE_MISALIGNED_SLOW;
184 }
185 #endif
186 
187 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
188 			     const struct cpumask *cpus)
189 {
190 	switch (pair->key) {
191 	case RISCV_HWPROBE_KEY_MVENDORID:
192 	case RISCV_HWPROBE_KEY_MARCHID:
193 	case RISCV_HWPROBE_KEY_MIMPID:
194 		hwprobe_arch_id(pair, cpus);
195 		break;
196 	/*
197 	 * The kernel already assumes that the base single-letter ISA
198 	 * extensions are supported on all harts, and only supports the
199 	 * IMA base, so just cheat a bit here and tell that to
200 	 * userspace.
201 	 */
202 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
203 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
204 		break;
205 
206 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
207 		hwprobe_isa_ext0(pair, cpus);
208 		break;
209 
210 	case RISCV_HWPROBE_KEY_CPUPERF_0:
211 		pair->value = hwprobe_misaligned(cpus);
212 		break;
213 
214 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
215 		pair->value = 0;
216 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
217 			pair->value = riscv_cboz_block_size;
218 		break;
219 
220 	/*
221 	 * For forward compatibility, unknown keys don't fail the whole
222 	 * call, but get their element key set to -1 and value set to 0
223 	 * indicating they're unrecognized.
224 	 */
225 	default:
226 		pair->key = -1;
227 		pair->value = 0;
228 		break;
229 	}
230 }
231 
232 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
233 			      size_t pair_count, size_t cpusetsize,
234 			      unsigned long __user *cpus_user,
235 			      unsigned int flags)
236 {
237 	size_t out;
238 	int ret;
239 	cpumask_t cpus;
240 
241 	/* Check the reserved flags. */
242 	if (flags != 0)
243 		return -EINVAL;
244 
245 	/*
246 	 * The interface supports taking in a CPU mask, and returns values that
247 	 * are consistent across that mask. Allow userspace to specify NULL and
248 	 * 0 as a shortcut to all online CPUs.
249 	 */
250 	cpumask_clear(&cpus);
251 	if (!cpusetsize && !cpus_user) {
252 		cpumask_copy(&cpus, cpu_online_mask);
253 	} else {
254 		if (cpusetsize > cpumask_size())
255 			cpusetsize = cpumask_size();
256 
257 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
258 		if (ret)
259 			return -EFAULT;
260 
261 		/*
262 		 * Userspace must provide at least one online CPU, without that
263 		 * there's no way to define what is supported.
264 		 */
265 		cpumask_and(&cpus, &cpus, cpu_online_mask);
266 		if (cpumask_empty(&cpus))
267 			return -EINVAL;
268 	}
269 
270 	for (out = 0; out < pair_count; out++, pairs++) {
271 		struct riscv_hwprobe pair;
272 
273 		if (get_user(pair.key, &pairs->key))
274 			return -EFAULT;
275 
276 		pair.value = 0;
277 		hwprobe_one_pair(&pair, &cpus);
278 		ret = put_user(pair.key, &pairs->key);
279 		if (ret == 0)
280 			ret = put_user(pair.value, &pairs->value);
281 
282 		if (ret)
283 			return -EFAULT;
284 	}
285 
286 	return 0;
287 }
288 
289 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
290 			    size_t pair_count, size_t cpusetsize,
291 			    unsigned long __user *cpus_user,
292 			    unsigned int flags)
293 {
294 	cpumask_t cpus, one_cpu;
295 	bool clear_all = false;
296 	size_t i;
297 	int ret;
298 
299 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
300 		return -EINVAL;
301 
302 	if (!cpusetsize || !cpus_user)
303 		return -EINVAL;
304 
305 	if (cpusetsize > cpumask_size())
306 		cpusetsize = cpumask_size();
307 
308 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
309 	if (ret)
310 		return -EFAULT;
311 
312 	if (cpumask_empty(&cpus))
313 		cpumask_copy(&cpus, cpu_online_mask);
314 
315 	cpumask_and(&cpus, &cpus, cpu_online_mask);
316 
317 	cpumask_clear(&one_cpu);
318 
319 	for (i = 0; i < pair_count; i++) {
320 		struct riscv_hwprobe pair, tmp;
321 		int cpu;
322 
323 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
324 		if (ret)
325 			return -EFAULT;
326 
327 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
328 			clear_all = true;
329 			pair = (struct riscv_hwprobe){ .key = -1, };
330 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
331 			if (ret)
332 				return -EFAULT;
333 		}
334 
335 		if (clear_all)
336 			continue;
337 
338 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
339 
340 		for_each_cpu(cpu, &cpus) {
341 			cpumask_set_cpu(cpu, &one_cpu);
342 
343 			hwprobe_one_pair(&tmp, &one_cpu);
344 
345 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
346 				cpumask_clear_cpu(cpu, &cpus);
347 
348 			cpumask_clear_cpu(cpu, &one_cpu);
349 		}
350 	}
351 
352 	if (clear_all)
353 		cpumask_clear(&cpus);
354 
355 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
356 	if (ret)
357 		return -EFAULT;
358 
359 	return 0;
360 }
361 
362 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
363 			    size_t pair_count, size_t cpusetsize,
364 			    unsigned long __user *cpus_user,
365 			    unsigned int flags)
366 {
367 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
368 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
369 					cpus_user, flags);
370 
371 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
372 				  cpus_user, flags);
373 }
374 
375 #ifdef CONFIG_MMU
376 
377 static int __init init_hwprobe_vdso_data(void)
378 {
379 	struct vdso_data *vd = __arch_get_k_vdso_data();
380 	struct arch_vdso_data *avd = &vd->arch_data;
381 	u64 id_bitsmash = 0;
382 	struct riscv_hwprobe pair;
383 	int key;
384 
385 	/*
386 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
387 	 * save a syscall in the common case.
388 	 */
389 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
390 		pair.key = key;
391 		hwprobe_one_pair(&pair, cpu_online_mask);
392 
393 		WARN_ON_ONCE(pair.key < 0);
394 
395 		avd->all_cpu_hwprobe_values[key] = pair.value;
396 		/*
397 		 * Smash together the vendor, arch, and impl IDs to see if
398 		 * they're all 0 or any negative.
399 		 */
400 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
401 			id_bitsmash |= pair.value;
402 	}
403 
404 	/*
405 	 * If the arch, vendor, and implementation ID are all the same across
406 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
407 	 * answer queries for arbitrary masks. However if all values are 0 (not
408 	 * populated) or any value returns -1 (varies across CPUs), then the
409 	 * vDSO should defer to the kernel for exotic cpu masks.
410 	 */
411 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
412 	return 0;
413 }
414 
415 arch_initcall_sync(init_hwprobe_vdso_data);
416 
417 #endif /* CONFIG_MMU */
418 
419 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
420 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
421 		cpus, unsigned int, flags)
422 {
423 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
424 				cpus, flags);
425 }
426