xref: /linux/arch/riscv/kernel/cpufeature.c (revision 54d7431af73e2fa53b73cfeb2bec559c6664a4e4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copied from arch/arm64/kernel/cpufeature.c
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  * Copyright (C) 2017 SiFive
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/ctype.h>
13 #include <linux/log2.h>
14 #include <linux/memory.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <asm/acpi.h>
18 #include <asm/alternative.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cpufeature.h>
21 #include <asm/hwcap.h>
22 #include <asm/hwprobe.h>
23 #include <asm/patch.h>
24 #include <asm/processor.h>
25 #include <asm/vector.h>
26 
27 #include "copy-unaligned.h"
28 
29 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
30 
31 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
32 #define MISALIGNED_BUFFER_SIZE 0x4000
33 #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
34 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
35 
36 unsigned long elf_hwcap __read_mostly;
37 
38 /* Host ISA bitmap */
39 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
40 
41 /* Per-cpu ISA extensions. */
42 struct riscv_isainfo hart_isa[NR_CPUS];
43 
44 /* Performance information */
45 DEFINE_PER_CPU(long, misaligned_access_speed);
46 
47 /**
48  * riscv_isa_extension_base() - Get base extension word
49  *
50  * @isa_bitmap: ISA bitmap to use
51  * Return: base extension word as unsigned long value
52  *
53  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
54  */
55 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
56 {
57 	if (!isa_bitmap)
58 		return riscv_isa[0];
59 	return isa_bitmap[0];
60 }
61 EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
62 
63 /**
64  * __riscv_isa_extension_available() - Check whether given extension
65  * is available or not
66  *
67  * @isa_bitmap: ISA bitmap to use
68  * @bit: bit position of the desired extension
69  * Return: true or false
70  *
71  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
72  */
73 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
74 {
75 	const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
76 
77 	if (bit >= RISCV_ISA_EXT_MAX)
78 		return false;
79 
80 	return test_bit(bit, bmap) ? true : false;
81 }
82 EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
83 
84 static bool riscv_isa_extension_check(int id)
85 {
86 	switch (id) {
87 	case RISCV_ISA_EXT_ZICBOM:
88 		if (!riscv_cbom_block_size) {
89 			pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
90 			return false;
91 		} else if (!is_power_of_2(riscv_cbom_block_size)) {
92 			pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
93 			return false;
94 		}
95 		return true;
96 	case RISCV_ISA_EXT_ZICBOZ:
97 		if (!riscv_cboz_block_size) {
98 			pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
99 			return false;
100 		} else if (!is_power_of_2(riscv_cboz_block_size)) {
101 			pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
102 			return false;
103 		}
104 		return true;
105 	case RISCV_ISA_EXT_INVALID:
106 		return false;
107 	}
108 
109 	return true;
110 }
111 
112 #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) {	\
113 	.name = #_name,								\
114 	.property = #_name,							\
115 	.id = _id,								\
116 	.subset_ext_ids = _subset_exts,						\
117 	.subset_ext_size = _subset_exts_size					\
118 }
119 
120 #define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
121 
122 /* Used to declare pure "lasso" extension (Zk for instance) */
123 #define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
124 	_RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
125 
126 /* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
127 #define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
128 	_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
129 
130 static const unsigned int riscv_zk_bundled_exts[] = {
131 	RISCV_ISA_EXT_ZBKB,
132 	RISCV_ISA_EXT_ZBKC,
133 	RISCV_ISA_EXT_ZBKX,
134 	RISCV_ISA_EXT_ZKND,
135 	RISCV_ISA_EXT_ZKNE,
136 	RISCV_ISA_EXT_ZKR,
137 	RISCV_ISA_EXT_ZKT,
138 };
139 
140 static const unsigned int riscv_zkn_bundled_exts[] = {
141 	RISCV_ISA_EXT_ZBKB,
142 	RISCV_ISA_EXT_ZBKC,
143 	RISCV_ISA_EXT_ZBKX,
144 	RISCV_ISA_EXT_ZKND,
145 	RISCV_ISA_EXT_ZKNE,
146 	RISCV_ISA_EXT_ZKNH,
147 };
148 
149 static const unsigned int riscv_zks_bundled_exts[] = {
150 	RISCV_ISA_EXT_ZBKB,
151 	RISCV_ISA_EXT_ZBKC,
152 	RISCV_ISA_EXT_ZKSED,
153 	RISCV_ISA_EXT_ZKSH
154 };
155 
156 #define RISCV_ISA_EXT_ZVKN	\
157 	RISCV_ISA_EXT_ZVKNED,	\
158 	RISCV_ISA_EXT_ZVKNHB,	\
159 	RISCV_ISA_EXT_ZVKB,	\
160 	RISCV_ISA_EXT_ZVKT
161 
162 static const unsigned int riscv_zvkn_bundled_exts[] = {
163 	RISCV_ISA_EXT_ZVKN
164 };
165 
166 static const unsigned int riscv_zvknc_bundled_exts[] = {
167 	RISCV_ISA_EXT_ZVKN,
168 	RISCV_ISA_EXT_ZVBC
169 };
170 
171 static const unsigned int riscv_zvkng_bundled_exts[] = {
172 	RISCV_ISA_EXT_ZVKN,
173 	RISCV_ISA_EXT_ZVKG
174 };
175 
176 #define RISCV_ISA_EXT_ZVKS	\
177 	RISCV_ISA_EXT_ZVKSED,	\
178 	RISCV_ISA_EXT_ZVKSH,	\
179 	RISCV_ISA_EXT_ZVKB,	\
180 	RISCV_ISA_EXT_ZVKT
181 
182 static const unsigned int riscv_zvks_bundled_exts[] = {
183 	RISCV_ISA_EXT_ZVKS
184 };
185 
186 static const unsigned int riscv_zvksc_bundled_exts[] = {
187 	RISCV_ISA_EXT_ZVKS,
188 	RISCV_ISA_EXT_ZVBC
189 };
190 
191 static const unsigned int riscv_zvksg_bundled_exts[] = {
192 	RISCV_ISA_EXT_ZVKS,
193 	RISCV_ISA_EXT_ZVKG
194 };
195 
196 static const unsigned int riscv_zvbb_exts[] = {
197 	RISCV_ISA_EXT_ZVKB
198 };
199 
200 /*
201  * The canonical order of ISA extension names in the ISA string is defined in
202  * chapter 27 of the unprivileged specification.
203  *
204  * Ordinarily, for in-kernel data structures, this order is unimportant but
205  * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
206  *
207  * The specification uses vague wording, such as should, when it comes to
208  * ordering, so for our purposes the following rules apply:
209  *
210  * 1. All multi-letter extensions must be separated from other extensions by an
211  *    underscore.
212  *
213  * 2. Additional standard extensions (starting with 'Z') must be sorted after
214  *    single-letter extensions and before any higher-privileged extensions.
215  *
216  * 3. The first letter following the 'Z' conventionally indicates the most
217  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
218  *    If multiple 'Z' extensions are named, they must be ordered first by
219  *    category, then alphabetically within a category.
220  *
221  * 3. Standard supervisor-level extensions (starting with 'S') must be listed
222  *    after standard unprivileged extensions.  If multiple supervisor-level
223  *    extensions are listed, they must be ordered alphabetically.
224  *
225  * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
226  *    after any lower-privileged, standard extensions.  If multiple
227  *    machine-level extensions are listed, they must be ordered
228  *    alphabetically.
229  *
230  * 5. Non-standard extensions (starting with 'X') must be listed after all
231  *    standard extensions. If multiple non-standard extensions are listed, they
232  *    must be ordered alphabetically.
233  *
234  * An example string following the order is:
235  *    rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
236  *
237  * New entries to this struct should follow the ordering rules described above.
238  */
239 const struct riscv_isa_ext_data riscv_isa_ext[] = {
240 	__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
241 	__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
242 	__RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
243 	__RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
244 	__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
245 	__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
246 	__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
247 	__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
248 	__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
249 	__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
250 	__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
251 	__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
252 	__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
253 	__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
254 	__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
255 	__RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
256 	__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
257 	__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
258 	__RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
259 	__RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
260 	__RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
261 	__RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
262 	__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
263 	__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
264 	__RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
265 	__RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
266 	__RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
267 	__RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
268 	__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
269 	__RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
270 	__RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
271 	__RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
272 	__RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
273 	__RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
274 	__RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
275 	__RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
276 	__RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
277 	__RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
278 	__RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
279 	__RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
280 	__RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
281 	__RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
282 	__RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
283 	__RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
284 	__RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
285 	__RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
286 	__RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
287 	__RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
288 	__RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
289 	__RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
290 	__RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
291 	__RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
292 	__RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
293 	__RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
294 	__RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
295 	__RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
296 	__RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
297 	__RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
298 	__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
299 	__RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
300 	__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
301 	__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
302 	__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
303 	__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
304 	__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
305 	__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
306 };
307 
308 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
309 
310 static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
311 				 const char *name_end, struct riscv_isainfo *isainfo)
312 {
313 	if ((name_end - name == strlen(ext->name)) &&
314 	     !strncasecmp(name, ext->name, name_end - name)) {
315 		/*
316 		 * If this is a bundle, enable all the ISA extensions that
317 		 * comprise the bundle.
318 		 */
319 		if (ext->subset_ext_size) {
320 			for (int i = 0; i < ext->subset_ext_size; i++) {
321 				if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
322 					set_bit(ext->subset_ext_ids[i], isainfo->isa);
323 			}
324 		}
325 
326 		/*
327 		 * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
328 		 * (rejected by riscv_isa_extension_check()).
329 		 */
330 		if (riscv_isa_extension_check(ext->id))
331 			set_bit(ext->id, isainfo->isa);
332 	}
333 }
334 
335 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
336 					  unsigned long *isa2hwcap, const char *isa)
337 {
338 	/*
339 	 * For all possible cpus, we have already validated in
340 	 * the boot process that they at least contain "rv" and
341 	 * whichever of "32"/"64" this kernel supports, and so this
342 	 * section can be skipped.
343 	 */
344 	isa += 4;
345 
346 	while (*isa) {
347 		const char *ext = isa++;
348 		const char *ext_end = isa;
349 		bool ext_long = false, ext_err = false;
350 
351 		switch (*ext) {
352 		case 's':
353 			/*
354 			 * Workaround for invalid single-letter 's' & 'u' (QEMU).
355 			 * No need to set the bit in riscv_isa as 's' & 'u' are
356 			 * not valid ISA extensions. It works unless the first
357 			 * multi-letter extension in the ISA string begins with
358 			 * "Su" and is not prefixed with an underscore.
359 			 */
360 			if (ext[-1] != '_' && ext[1] == 'u') {
361 				++isa;
362 				ext_err = true;
363 				break;
364 			}
365 			fallthrough;
366 		case 'S':
367 		case 'x':
368 		case 'X':
369 		case 'z':
370 		case 'Z':
371 			/*
372 			 * Before attempting to parse the extension itself, we find its end.
373 			 * As multi-letter extensions must be split from other multi-letter
374 			 * extensions with an "_", the end of a multi-letter extension will
375 			 * either be the null character or the "_" at the start of the next
376 			 * multi-letter extension.
377 			 *
378 			 * Next, as the extensions version is currently ignored, we
379 			 * eliminate that portion. This is done by parsing backwards from
380 			 * the end of the extension, removing any numbers. This may be a
381 			 * major or minor number however, so the process is repeated if a
382 			 * minor number was found.
383 			 *
384 			 * ext_end is intended to represent the first character *after* the
385 			 * name portion of an extension, but will be decremented to the last
386 			 * character itself while eliminating the extensions version number.
387 			 * A simple re-increment solves this problem.
388 			 */
389 			ext_long = true;
390 			for (; *isa && *isa != '_'; ++isa)
391 				if (unlikely(!isalnum(*isa)))
392 					ext_err = true;
393 
394 			ext_end = isa;
395 			if (unlikely(ext_err))
396 				break;
397 
398 			if (!isdigit(ext_end[-1]))
399 				break;
400 
401 			while (isdigit(*--ext_end))
402 				;
403 
404 			if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
405 				++ext_end;
406 				break;
407 			}
408 
409 			while (isdigit(*--ext_end))
410 				;
411 
412 			++ext_end;
413 			break;
414 		default:
415 			/*
416 			 * Things are a little easier for single-letter extensions, as they
417 			 * are parsed forwards.
418 			 *
419 			 * After checking that our starting position is valid, we need to
420 			 * ensure that, when isa was incremented at the start of the loop,
421 			 * that it arrived at the start of the next extension.
422 			 *
423 			 * If we are already on a non-digit, there is nothing to do. Either
424 			 * we have a multi-letter extension's _, or the start of an
425 			 * extension.
426 			 *
427 			 * Otherwise we have found the current extension's major version
428 			 * number. Parse past it, and a subsequent p/minor version number
429 			 * if present. The `p` extension must not appear immediately after
430 			 * a number, so there is no fear of missing it.
431 			 *
432 			 */
433 			if (unlikely(!isalpha(*ext))) {
434 				ext_err = true;
435 				break;
436 			}
437 
438 			if (!isdigit(*isa))
439 				break;
440 
441 			while (isdigit(*++isa))
442 				;
443 
444 			if (tolower(*isa) != 'p')
445 				break;
446 
447 			if (!isdigit(*++isa)) {
448 				--isa;
449 				break;
450 			}
451 
452 			while (isdigit(*++isa))
453 				;
454 
455 			break;
456 		}
457 
458 		/*
459 		 * The parser expects that at the start of an iteration isa points to the
460 		 * first character of the next extension. As we stop parsing an extension
461 		 * on meeting a non-alphanumeric character, an extra increment is needed
462 		 * where the succeeding extension is a multi-letter prefixed with an "_".
463 		 */
464 		if (*isa == '_')
465 			++isa;
466 
467 		if (unlikely(ext_err))
468 			continue;
469 		if (!ext_long) {
470 			int nr = tolower(*ext) - 'a';
471 
472 			if (riscv_isa_extension_check(nr)) {
473 				*this_hwcap |= isa2hwcap[nr];
474 				set_bit(nr, isainfo->isa);
475 			}
476 		} else {
477 			for (int i = 0; i < riscv_isa_ext_count; i++)
478 				match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
479 		}
480 	}
481 }
482 
483 static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
484 {
485 	struct device_node *node;
486 	const char *isa;
487 	int rc;
488 	struct acpi_table_header *rhct;
489 	acpi_status status;
490 	unsigned int cpu;
491 
492 	if (!acpi_disabled) {
493 		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
494 		if (ACPI_FAILURE(status))
495 			return;
496 	}
497 
498 	for_each_possible_cpu(cpu) {
499 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
500 		unsigned long this_hwcap = 0;
501 
502 		if (acpi_disabled) {
503 			node = of_cpu_device_node_get(cpu);
504 			if (!node) {
505 				pr_warn("Unable to find cpu node\n");
506 				continue;
507 			}
508 
509 			rc = of_property_read_string(node, "riscv,isa", &isa);
510 			of_node_put(node);
511 			if (rc) {
512 				pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
513 				continue;
514 			}
515 		} else {
516 			rc = acpi_get_riscv_isa(rhct, cpu, &isa);
517 			if (rc < 0) {
518 				pr_warn("Unable to get ISA for the hart - %d\n", cpu);
519 				continue;
520 			}
521 		}
522 
523 		riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
524 
525 		/*
526 		 * These ones were as they were part of the base ISA when the
527 		 * port & dt-bindings were upstreamed, and so can be set
528 		 * unconditionally where `i` is in riscv,isa on DT systems.
529 		 */
530 		if (acpi_disabled) {
531 			set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
532 			set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
533 			set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
534 			set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
535 		}
536 
537 		/*
538 		 * All "okay" hart should have same isa. Set HWCAP based on
539 		 * common capabilities of every "okay" hart, in case they don't
540 		 * have.
541 		 */
542 		if (elf_hwcap)
543 			elf_hwcap &= this_hwcap;
544 		else
545 			elf_hwcap = this_hwcap;
546 
547 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
548 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
549 		else
550 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
551 	}
552 
553 	if (!acpi_disabled && rhct)
554 		acpi_put_table((struct acpi_table_header *)rhct);
555 }
556 
557 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
558 {
559 	unsigned int cpu;
560 
561 	for_each_possible_cpu(cpu) {
562 		unsigned long this_hwcap = 0;
563 		struct device_node *cpu_node;
564 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
565 
566 		cpu_node = of_cpu_device_node_get(cpu);
567 		if (!cpu_node) {
568 			pr_warn("Unable to find cpu node\n");
569 			continue;
570 		}
571 
572 		if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
573 			of_node_put(cpu_node);
574 			continue;
575 		}
576 
577 		for (int i = 0; i < riscv_isa_ext_count; i++) {
578 			const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
579 
580 			if (of_property_match_string(cpu_node, "riscv,isa-extensions",
581 						     ext->property) < 0)
582 				continue;
583 
584 			if (ext->subset_ext_size) {
585 				for (int j = 0; j < ext->subset_ext_size; j++) {
586 					if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
587 						set_bit(ext->subset_ext_ids[j], isainfo->isa);
588 				}
589 			}
590 
591 			if (riscv_isa_extension_check(ext->id)) {
592 				set_bit(ext->id, isainfo->isa);
593 
594 				/* Only single letter extensions get set in hwcap */
595 				if (strnlen(riscv_isa_ext[i].name, 2) == 1)
596 					this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
597 			}
598 		}
599 
600 		of_node_put(cpu_node);
601 
602 		/*
603 		 * All "okay" harts should have same isa. Set HWCAP based on
604 		 * common capabilities of every "okay" hart, in case they don't.
605 		 */
606 		if (elf_hwcap)
607 			elf_hwcap &= this_hwcap;
608 		else
609 			elf_hwcap = this_hwcap;
610 
611 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
612 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
613 		else
614 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
615 	}
616 
617 	if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
618 		return -ENOENT;
619 
620 	return 0;
621 }
622 
623 #ifdef CONFIG_RISCV_ISA_FALLBACK
624 bool __initdata riscv_isa_fallback = true;
625 #else
626 bool __initdata riscv_isa_fallback;
627 static int __init riscv_isa_fallback_setup(char *__unused)
628 {
629 	riscv_isa_fallback = true;
630 	return 1;
631 }
632 early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
633 #endif
634 
635 void __init riscv_fill_hwcap(void)
636 {
637 	char print_str[NUM_ALPHA_EXTS + 1];
638 	unsigned long isa2hwcap[26] = {0};
639 	int i, j;
640 
641 	isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
642 	isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
643 	isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
644 	isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
645 	isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
646 	isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
647 	isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
648 
649 	if (!acpi_disabled) {
650 		riscv_fill_hwcap_from_isa_string(isa2hwcap);
651 	} else {
652 		int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
653 
654 		if (ret && riscv_isa_fallback) {
655 			pr_info("Falling back to deprecated \"riscv,isa\"\n");
656 			riscv_fill_hwcap_from_isa_string(isa2hwcap);
657 		}
658 	}
659 
660 	/*
661 	 * We don't support systems with F but without D, so mask those out
662 	 * here.
663 	 */
664 	if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
665 		pr_info("This kernel does not support systems with F but not D\n");
666 		elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
667 	}
668 
669 	if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
670 		riscv_v_setup_vsize();
671 		/*
672 		 * ISA string in device tree might have 'v' flag, but
673 		 * CONFIG_RISCV_ISA_V is disabled in kernel.
674 		 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
675 		 */
676 		if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
677 			elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
678 	}
679 
680 	memset(print_str, 0, sizeof(print_str));
681 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
682 		if (riscv_isa[0] & BIT_MASK(i))
683 			print_str[j++] = (char)('a' + i);
684 	pr_info("riscv: base ISA extensions %s\n", print_str);
685 
686 	memset(print_str, 0, sizeof(print_str));
687 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
688 		if (elf_hwcap & BIT_MASK(i))
689 			print_str[j++] = (char)('a' + i);
690 	pr_info("riscv: ELF capabilities %s\n", print_str);
691 }
692 
693 unsigned long riscv_get_elf_hwcap(void)
694 {
695 	unsigned long hwcap;
696 
697 	hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
698 
699 	if (!riscv_v_vstate_ctrl_user_allowed())
700 		hwcap &= ~COMPAT_HWCAP_ISA_V;
701 
702 	return hwcap;
703 }
704 
705 static int check_unaligned_access(void *param)
706 {
707 	int cpu = smp_processor_id();
708 	u64 start_cycles, end_cycles;
709 	u64 word_cycles;
710 	u64 byte_cycles;
711 	int ratio;
712 	unsigned long start_jiffies, now;
713 	struct page *page = param;
714 	void *dst;
715 	void *src;
716 	long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
717 
718 	if (check_unaligned_access_emulated(cpu))
719 		return 0;
720 
721 	/* Make an unaligned destination buffer. */
722 	dst = (void *)((unsigned long)page_address(page) | 0x1);
723 	/* Unalign src as well, but differently (off by 1 + 2 = 3). */
724 	src = dst + (MISALIGNED_BUFFER_SIZE / 2);
725 	src += 2;
726 	word_cycles = -1ULL;
727 	/* Do a warmup. */
728 	__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
729 	preempt_disable();
730 	start_jiffies = jiffies;
731 	while ((now = jiffies) == start_jiffies)
732 		cpu_relax();
733 
734 	/*
735 	 * For a fixed amount of time, repeatedly try the function, and take
736 	 * the best time in cycles as the measurement.
737 	 */
738 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
739 		start_cycles = get_cycles64();
740 		/* Ensure the CSR read can't reorder WRT to the copy. */
741 		mb();
742 		__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
743 		/* Ensure the copy ends before the end time is snapped. */
744 		mb();
745 		end_cycles = get_cycles64();
746 		if ((end_cycles - start_cycles) < word_cycles)
747 			word_cycles = end_cycles - start_cycles;
748 	}
749 
750 	byte_cycles = -1ULL;
751 	__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
752 	start_jiffies = jiffies;
753 	while ((now = jiffies) == start_jiffies)
754 		cpu_relax();
755 
756 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
757 		start_cycles = get_cycles64();
758 		mb();
759 		__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
760 		mb();
761 		end_cycles = get_cycles64();
762 		if ((end_cycles - start_cycles) < byte_cycles)
763 			byte_cycles = end_cycles - start_cycles;
764 	}
765 
766 	preempt_enable();
767 
768 	/* Don't divide by zero. */
769 	if (!word_cycles || !byte_cycles) {
770 		pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
771 			cpu);
772 
773 		return 0;
774 	}
775 
776 	if (word_cycles < byte_cycles)
777 		speed = RISCV_HWPROBE_MISALIGNED_FAST;
778 
779 	ratio = div_u64((byte_cycles * 100), word_cycles);
780 	pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
781 		cpu,
782 		ratio / 100,
783 		ratio % 100,
784 		(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
785 
786 	per_cpu(misaligned_access_speed, cpu) = speed;
787 	return 0;
788 }
789 
790 static void check_unaligned_access_nonboot_cpu(void *param)
791 {
792 	unsigned int cpu = smp_processor_id();
793 	struct page **pages = param;
794 
795 	if (smp_processor_id() != 0)
796 		check_unaligned_access(pages[cpu]);
797 }
798 
799 static int riscv_online_cpu(unsigned int cpu)
800 {
801 	static struct page *buf;
802 
803 	/* We are already set since the last check */
804 	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
805 		return 0;
806 
807 	buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
808 	if (!buf) {
809 		pr_warn("Allocation failure, not measuring misaligned performance\n");
810 		return -ENOMEM;
811 	}
812 
813 	check_unaligned_access(buf);
814 	__free_pages(buf, MISALIGNED_BUFFER_ORDER);
815 	return 0;
816 }
817 
818 /* Measure unaligned access on all CPUs present at boot in parallel. */
819 static int check_unaligned_access_all_cpus(void)
820 {
821 	unsigned int cpu;
822 	unsigned int cpu_count = num_possible_cpus();
823 	struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
824 				     GFP_KERNEL);
825 
826 	if (!bufs) {
827 		pr_warn("Allocation failure, not measuring misaligned performance\n");
828 		return 0;
829 	}
830 
831 	/*
832 	 * Allocate separate buffers for each CPU so there's no fighting over
833 	 * cache lines.
834 	 */
835 	for_each_cpu(cpu, cpu_online_mask) {
836 		bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
837 		if (!bufs[cpu]) {
838 			pr_warn("Allocation failure, not measuring misaligned performance\n");
839 			goto out;
840 		}
841 	}
842 
843 	/* Check everybody except 0, who stays behind to tend jiffies. */
844 	on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
845 
846 	/* Check core 0. */
847 	smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
848 
849 	/* Setup hotplug callback for any new CPUs that come online. */
850 	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
851 				  riscv_online_cpu, NULL);
852 
853 out:
854 	unaligned_emulation_finish();
855 	for_each_cpu(cpu, cpu_online_mask) {
856 		if (bufs[cpu])
857 			__free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
858 	}
859 
860 	kfree(bufs);
861 	return 0;
862 }
863 
864 arch_initcall(check_unaligned_access_all_cpus);
865 
866 void riscv_user_isa_enable(void)
867 {
868 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
869 		csr_set(CSR_SENVCFG, ENVCFG_CBZE);
870 }
871 
872 #ifdef CONFIG_RISCV_ALTERNATIVE
873 /*
874  * Alternative patch sites consider 48 bits when determining when to patch
875  * the old instruction sequence with the new. These bits are broken into a
876  * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
877  * patch site is for an erratum, identified by the 32-bit patch ID. When
878  * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
879  * further break down patch ID into two 16-bit numbers. The lower 16 bits
880  * are the cpufeature ID and the upper 16 bits are used for a value specific
881  * to the cpufeature and patch site. If the upper 16 bits are zero, then it
882  * implies no specific value is specified. cpufeatures that want to control
883  * patching on a per-site basis will provide non-zero values and implement
884  * checks here. The checks return true when patching should be done, and
885  * false otherwise.
886  */
887 static bool riscv_cpufeature_patch_check(u16 id, u16 value)
888 {
889 	if (!value)
890 		return true;
891 
892 	switch (id) {
893 	case RISCV_ISA_EXT_ZICBOZ:
894 		/*
895 		 * Zicboz alternative applications provide the maximum
896 		 * supported block size order, or zero when it doesn't
897 		 * matter. If the current block size exceeds the maximum,
898 		 * then the alternative cannot be applied.
899 		 */
900 		return riscv_cboz_block_size <= (1U << value);
901 	}
902 
903 	return false;
904 }
905 
906 void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
907 						  struct alt_entry *end,
908 						  unsigned int stage)
909 {
910 	struct alt_entry *alt;
911 	void *oldptr, *altptr;
912 	u16 id, value;
913 
914 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
915 		return;
916 
917 	for (alt = begin; alt < end; alt++) {
918 		if (alt->vendor_id != 0)
919 			continue;
920 
921 		id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
922 
923 		if (id >= RISCV_ISA_EXT_MAX) {
924 			WARN(1, "This extension id:%d is not in ISA extension list", id);
925 			continue;
926 		}
927 
928 		if (!__riscv_isa_extension_available(NULL, id))
929 			continue;
930 
931 		value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
932 		if (!riscv_cpufeature_patch_check(id, value))
933 			continue;
934 
935 		oldptr = ALT_OLD_PTR(alt);
936 		altptr = ALT_ALT_PTR(alt);
937 
938 		mutex_lock(&text_mutex);
939 		patch_text_nosync(oldptr, altptr, alt->alt_len);
940 		riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
941 		mutex_unlock(&text_mutex);
942 	}
943 }
944 #endif
945