xref: /linux/arch/riscv/kernel/cpufeature.c (revision 2672031b20f6681514bef14ddcfe8c62c2757d11)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copied from arch/arm64/kernel/cpufeature.c
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  * Copyright (C) 2017 SiFive
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/ctype.h>
14 #include <linux/jump_label.h>
15 #include <linux/log2.h>
16 #include <linux/memory.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <asm/acpi.h>
20 #include <asm/alternative.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cpufeature.h>
23 #include <asm/hwcap.h>
24 #include <asm/hwprobe.h>
25 #include <asm/patch.h>
26 #include <asm/processor.h>
27 #include <asm/vector.h>
28 
29 #include "copy-unaligned.h"
30 
31 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
32 
33 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
34 #define MISALIGNED_BUFFER_SIZE 0x4000
35 #define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
36 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
37 
38 unsigned long elf_hwcap __read_mostly;
39 
40 /* Host ISA bitmap */
41 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
42 
43 /* Per-cpu ISA extensions. */
44 struct riscv_isainfo hart_isa[NR_CPUS];
45 
46 /* Performance information */
47 DEFINE_PER_CPU(long, misaligned_access_speed);
48 
49 static cpumask_t fast_misaligned_access;
50 
51 /**
52  * riscv_isa_extension_base() - Get base extension word
53  *
54  * @isa_bitmap: ISA bitmap to use
55  * Return: base extension word as unsigned long value
56  *
57  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
58  */
59 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
60 {
61 	if (!isa_bitmap)
62 		return riscv_isa[0];
63 	return isa_bitmap[0];
64 }
65 EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
66 
67 /**
68  * __riscv_isa_extension_available() - Check whether given extension
69  * is available or not
70  *
71  * @isa_bitmap: ISA bitmap to use
72  * @bit: bit position of the desired extension
73  * Return: true or false
74  *
75  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
76  */
77 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
78 {
79 	const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
80 
81 	if (bit >= RISCV_ISA_EXT_MAX)
82 		return false;
83 
84 	return test_bit(bit, bmap) ? true : false;
85 }
86 EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
87 
88 static bool riscv_isa_extension_check(int id)
89 {
90 	switch (id) {
91 	case RISCV_ISA_EXT_ZICBOM:
92 		if (!riscv_cbom_block_size) {
93 			pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
94 			return false;
95 		} else if (!is_power_of_2(riscv_cbom_block_size)) {
96 			pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
97 			return false;
98 		}
99 		return true;
100 	case RISCV_ISA_EXT_ZICBOZ:
101 		if (!riscv_cboz_block_size) {
102 			pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
103 			return false;
104 		} else if (!is_power_of_2(riscv_cboz_block_size)) {
105 			pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
106 			return false;
107 		}
108 		return true;
109 	case RISCV_ISA_EXT_INVALID:
110 		return false;
111 	}
112 
113 	return true;
114 }
115 
116 #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) {	\
117 	.name = #_name,								\
118 	.property = #_name,							\
119 	.id = _id,								\
120 	.subset_ext_ids = _subset_exts,						\
121 	.subset_ext_size = _subset_exts_size					\
122 }
123 
124 #define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
125 
126 /* Used to declare pure "lasso" extension (Zk for instance) */
127 #define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
128 	_RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
129 
130 /* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
131 #define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
132 	_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
133 
134 static const unsigned int riscv_zk_bundled_exts[] = {
135 	RISCV_ISA_EXT_ZBKB,
136 	RISCV_ISA_EXT_ZBKC,
137 	RISCV_ISA_EXT_ZBKX,
138 	RISCV_ISA_EXT_ZKND,
139 	RISCV_ISA_EXT_ZKNE,
140 	RISCV_ISA_EXT_ZKR,
141 	RISCV_ISA_EXT_ZKT,
142 };
143 
144 static const unsigned int riscv_zkn_bundled_exts[] = {
145 	RISCV_ISA_EXT_ZBKB,
146 	RISCV_ISA_EXT_ZBKC,
147 	RISCV_ISA_EXT_ZBKX,
148 	RISCV_ISA_EXT_ZKND,
149 	RISCV_ISA_EXT_ZKNE,
150 	RISCV_ISA_EXT_ZKNH,
151 };
152 
153 static const unsigned int riscv_zks_bundled_exts[] = {
154 	RISCV_ISA_EXT_ZBKB,
155 	RISCV_ISA_EXT_ZBKC,
156 	RISCV_ISA_EXT_ZKSED,
157 	RISCV_ISA_EXT_ZKSH
158 };
159 
160 #define RISCV_ISA_EXT_ZVKN	\
161 	RISCV_ISA_EXT_ZVKNED,	\
162 	RISCV_ISA_EXT_ZVKNHB,	\
163 	RISCV_ISA_EXT_ZVKB,	\
164 	RISCV_ISA_EXT_ZVKT
165 
166 static const unsigned int riscv_zvkn_bundled_exts[] = {
167 	RISCV_ISA_EXT_ZVKN
168 };
169 
170 static const unsigned int riscv_zvknc_bundled_exts[] = {
171 	RISCV_ISA_EXT_ZVKN,
172 	RISCV_ISA_EXT_ZVBC
173 };
174 
175 static const unsigned int riscv_zvkng_bundled_exts[] = {
176 	RISCV_ISA_EXT_ZVKN,
177 	RISCV_ISA_EXT_ZVKG
178 };
179 
180 #define RISCV_ISA_EXT_ZVKS	\
181 	RISCV_ISA_EXT_ZVKSED,	\
182 	RISCV_ISA_EXT_ZVKSH,	\
183 	RISCV_ISA_EXT_ZVKB,	\
184 	RISCV_ISA_EXT_ZVKT
185 
186 static const unsigned int riscv_zvks_bundled_exts[] = {
187 	RISCV_ISA_EXT_ZVKS
188 };
189 
190 static const unsigned int riscv_zvksc_bundled_exts[] = {
191 	RISCV_ISA_EXT_ZVKS,
192 	RISCV_ISA_EXT_ZVBC
193 };
194 
195 static const unsigned int riscv_zvksg_bundled_exts[] = {
196 	RISCV_ISA_EXT_ZVKS,
197 	RISCV_ISA_EXT_ZVKG
198 };
199 
200 static const unsigned int riscv_zvbb_exts[] = {
201 	RISCV_ISA_EXT_ZVKB
202 };
203 
204 /*
205  * The canonical order of ISA extension names in the ISA string is defined in
206  * chapter 27 of the unprivileged specification.
207  *
208  * Ordinarily, for in-kernel data structures, this order is unimportant but
209  * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
210  *
211  * The specification uses vague wording, such as should, when it comes to
212  * ordering, so for our purposes the following rules apply:
213  *
214  * 1. All multi-letter extensions must be separated from other extensions by an
215  *    underscore.
216  *
217  * 2. Additional standard extensions (starting with 'Z') must be sorted after
218  *    single-letter extensions and before any higher-privileged extensions.
219  *
220  * 3. The first letter following the 'Z' conventionally indicates the most
221  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
222  *    If multiple 'Z' extensions are named, they must be ordered first by
223  *    category, then alphabetically within a category.
224  *
225  * 3. Standard supervisor-level extensions (starting with 'S') must be listed
226  *    after standard unprivileged extensions.  If multiple supervisor-level
227  *    extensions are listed, they must be ordered alphabetically.
228  *
229  * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
230  *    after any lower-privileged, standard extensions.  If multiple
231  *    machine-level extensions are listed, they must be ordered
232  *    alphabetically.
233  *
234  * 5. Non-standard extensions (starting with 'X') must be listed after all
235  *    standard extensions. If multiple non-standard extensions are listed, they
236  *    must be ordered alphabetically.
237  *
238  * An example string following the order is:
239  *    rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
240  *
241  * New entries to this struct should follow the ordering rules described above.
242  */
243 const struct riscv_isa_ext_data riscv_isa_ext[] = {
244 	__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
245 	__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
246 	__RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
247 	__RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
248 	__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
249 	__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
250 	__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
251 	__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
252 	__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
253 	__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
254 	__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
255 	__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
256 	__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
257 	__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
258 	__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
259 	__RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
260 	__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
261 	__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
262 	__RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
263 	__RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
264 	__RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
265 	__RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
266 	__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
267 	__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
268 	__RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
269 	__RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
270 	__RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
271 	__RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
272 	__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
273 	__RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
274 	__RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
275 	__RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
276 	__RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
277 	__RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
278 	__RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
279 	__RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
280 	__RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
281 	__RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
282 	__RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
283 	__RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
284 	__RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
285 	__RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
286 	__RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
287 	__RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
288 	__RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
289 	__RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
290 	__RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
291 	__RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
292 	__RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
293 	__RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
294 	__RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
295 	__RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
296 	__RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
297 	__RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
298 	__RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
299 	__RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
300 	__RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
301 	__RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
302 	__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
303 	__RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
304 	__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
305 	__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
306 	__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
307 	__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
308 	__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
309 	__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
310 };
311 
312 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
313 
314 static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
315 				 const char *name_end, struct riscv_isainfo *isainfo)
316 {
317 	if ((name_end - name == strlen(ext->name)) &&
318 	     !strncasecmp(name, ext->name, name_end - name)) {
319 		/*
320 		 * If this is a bundle, enable all the ISA extensions that
321 		 * comprise the bundle.
322 		 */
323 		if (ext->subset_ext_size) {
324 			for (int i = 0; i < ext->subset_ext_size; i++) {
325 				if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
326 					set_bit(ext->subset_ext_ids[i], isainfo->isa);
327 			}
328 		}
329 
330 		/*
331 		 * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
332 		 * (rejected by riscv_isa_extension_check()).
333 		 */
334 		if (riscv_isa_extension_check(ext->id))
335 			set_bit(ext->id, isainfo->isa);
336 	}
337 }
338 
339 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
340 					  unsigned long *isa2hwcap, const char *isa)
341 {
342 	/*
343 	 * For all possible cpus, we have already validated in
344 	 * the boot process that they at least contain "rv" and
345 	 * whichever of "32"/"64" this kernel supports, and so this
346 	 * section can be skipped.
347 	 */
348 	isa += 4;
349 
350 	while (*isa) {
351 		const char *ext = isa++;
352 		const char *ext_end = isa;
353 		bool ext_long = false, ext_err = false;
354 
355 		switch (*ext) {
356 		case 's':
357 			/*
358 			 * Workaround for invalid single-letter 's' & 'u' (QEMU).
359 			 * No need to set the bit in riscv_isa as 's' & 'u' are
360 			 * not valid ISA extensions. It works unless the first
361 			 * multi-letter extension in the ISA string begins with
362 			 * "Su" and is not prefixed with an underscore.
363 			 */
364 			if (ext[-1] != '_' && ext[1] == 'u') {
365 				++isa;
366 				ext_err = true;
367 				break;
368 			}
369 			fallthrough;
370 		case 'S':
371 		case 'x':
372 		case 'X':
373 		case 'z':
374 		case 'Z':
375 			/*
376 			 * Before attempting to parse the extension itself, we find its end.
377 			 * As multi-letter extensions must be split from other multi-letter
378 			 * extensions with an "_", the end of a multi-letter extension will
379 			 * either be the null character or the "_" at the start of the next
380 			 * multi-letter extension.
381 			 *
382 			 * Next, as the extensions version is currently ignored, we
383 			 * eliminate that portion. This is done by parsing backwards from
384 			 * the end of the extension, removing any numbers. This may be a
385 			 * major or minor number however, so the process is repeated if a
386 			 * minor number was found.
387 			 *
388 			 * ext_end is intended to represent the first character *after* the
389 			 * name portion of an extension, but will be decremented to the last
390 			 * character itself while eliminating the extensions version number.
391 			 * A simple re-increment solves this problem.
392 			 */
393 			ext_long = true;
394 			for (; *isa && *isa != '_'; ++isa)
395 				if (unlikely(!isalnum(*isa)))
396 					ext_err = true;
397 
398 			ext_end = isa;
399 			if (unlikely(ext_err))
400 				break;
401 
402 			if (!isdigit(ext_end[-1]))
403 				break;
404 
405 			while (isdigit(*--ext_end))
406 				;
407 
408 			if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
409 				++ext_end;
410 				break;
411 			}
412 
413 			while (isdigit(*--ext_end))
414 				;
415 
416 			++ext_end;
417 			break;
418 		default:
419 			/*
420 			 * Things are a little easier for single-letter extensions, as they
421 			 * are parsed forwards.
422 			 *
423 			 * After checking that our starting position is valid, we need to
424 			 * ensure that, when isa was incremented at the start of the loop,
425 			 * that it arrived at the start of the next extension.
426 			 *
427 			 * If we are already on a non-digit, there is nothing to do. Either
428 			 * we have a multi-letter extension's _, or the start of an
429 			 * extension.
430 			 *
431 			 * Otherwise we have found the current extension's major version
432 			 * number. Parse past it, and a subsequent p/minor version number
433 			 * if present. The `p` extension must not appear immediately after
434 			 * a number, so there is no fear of missing it.
435 			 *
436 			 */
437 			if (unlikely(!isalpha(*ext))) {
438 				ext_err = true;
439 				break;
440 			}
441 
442 			if (!isdigit(*isa))
443 				break;
444 
445 			while (isdigit(*++isa))
446 				;
447 
448 			if (tolower(*isa) != 'p')
449 				break;
450 
451 			if (!isdigit(*++isa)) {
452 				--isa;
453 				break;
454 			}
455 
456 			while (isdigit(*++isa))
457 				;
458 
459 			break;
460 		}
461 
462 		/*
463 		 * The parser expects that at the start of an iteration isa points to the
464 		 * first character of the next extension. As we stop parsing an extension
465 		 * on meeting a non-alphanumeric character, an extra increment is needed
466 		 * where the succeeding extension is a multi-letter prefixed with an "_".
467 		 */
468 		if (*isa == '_')
469 			++isa;
470 
471 		if (unlikely(ext_err))
472 			continue;
473 		if (!ext_long) {
474 			int nr = tolower(*ext) - 'a';
475 
476 			if (riscv_isa_extension_check(nr)) {
477 				*this_hwcap |= isa2hwcap[nr];
478 				set_bit(nr, isainfo->isa);
479 			}
480 		} else {
481 			for (int i = 0; i < riscv_isa_ext_count; i++)
482 				match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
483 		}
484 	}
485 }
486 
487 static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
488 {
489 	struct device_node *node;
490 	const char *isa;
491 	int rc;
492 	struct acpi_table_header *rhct;
493 	acpi_status status;
494 	unsigned int cpu;
495 
496 	if (!acpi_disabled) {
497 		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
498 		if (ACPI_FAILURE(status))
499 			return;
500 	}
501 
502 	for_each_possible_cpu(cpu) {
503 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
504 		unsigned long this_hwcap = 0;
505 
506 		if (acpi_disabled) {
507 			node = of_cpu_device_node_get(cpu);
508 			if (!node) {
509 				pr_warn("Unable to find cpu node\n");
510 				continue;
511 			}
512 
513 			rc = of_property_read_string(node, "riscv,isa", &isa);
514 			of_node_put(node);
515 			if (rc) {
516 				pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
517 				continue;
518 			}
519 		} else {
520 			rc = acpi_get_riscv_isa(rhct, cpu, &isa);
521 			if (rc < 0) {
522 				pr_warn("Unable to get ISA for the hart - %d\n", cpu);
523 				continue;
524 			}
525 		}
526 
527 		riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
528 
529 		/*
530 		 * These ones were as they were part of the base ISA when the
531 		 * port & dt-bindings were upstreamed, and so can be set
532 		 * unconditionally where `i` is in riscv,isa on DT systems.
533 		 */
534 		if (acpi_disabled) {
535 			set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
536 			set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
537 			set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
538 			set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
539 		}
540 
541 		/*
542 		 * All "okay" hart should have same isa. Set HWCAP based on
543 		 * common capabilities of every "okay" hart, in case they don't
544 		 * have.
545 		 */
546 		if (elf_hwcap)
547 			elf_hwcap &= this_hwcap;
548 		else
549 			elf_hwcap = this_hwcap;
550 
551 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
552 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
553 		else
554 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
555 	}
556 
557 	if (!acpi_disabled && rhct)
558 		acpi_put_table((struct acpi_table_header *)rhct);
559 }
560 
561 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
562 {
563 	unsigned int cpu;
564 
565 	for_each_possible_cpu(cpu) {
566 		unsigned long this_hwcap = 0;
567 		struct device_node *cpu_node;
568 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
569 
570 		cpu_node = of_cpu_device_node_get(cpu);
571 		if (!cpu_node) {
572 			pr_warn("Unable to find cpu node\n");
573 			continue;
574 		}
575 
576 		if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
577 			of_node_put(cpu_node);
578 			continue;
579 		}
580 
581 		for (int i = 0; i < riscv_isa_ext_count; i++) {
582 			const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
583 
584 			if (of_property_match_string(cpu_node, "riscv,isa-extensions",
585 						     ext->property) < 0)
586 				continue;
587 
588 			if (ext->subset_ext_size) {
589 				for (int j = 0; j < ext->subset_ext_size; j++) {
590 					if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
591 						set_bit(ext->subset_ext_ids[j], isainfo->isa);
592 				}
593 			}
594 
595 			if (riscv_isa_extension_check(ext->id)) {
596 				set_bit(ext->id, isainfo->isa);
597 
598 				/* Only single letter extensions get set in hwcap */
599 				if (strnlen(riscv_isa_ext[i].name, 2) == 1)
600 					this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
601 			}
602 		}
603 
604 		of_node_put(cpu_node);
605 
606 		/*
607 		 * All "okay" harts should have same isa. Set HWCAP based on
608 		 * common capabilities of every "okay" hart, in case they don't.
609 		 */
610 		if (elf_hwcap)
611 			elf_hwcap &= this_hwcap;
612 		else
613 			elf_hwcap = this_hwcap;
614 
615 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
616 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
617 		else
618 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
619 	}
620 
621 	if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
622 		return -ENOENT;
623 
624 	return 0;
625 }
626 
627 #ifdef CONFIG_RISCV_ISA_FALLBACK
628 bool __initdata riscv_isa_fallback = true;
629 #else
630 bool __initdata riscv_isa_fallback;
631 static int __init riscv_isa_fallback_setup(char *__unused)
632 {
633 	riscv_isa_fallback = true;
634 	return 1;
635 }
636 early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
637 #endif
638 
639 void __init riscv_fill_hwcap(void)
640 {
641 	char print_str[NUM_ALPHA_EXTS + 1];
642 	unsigned long isa2hwcap[26] = {0};
643 	int i, j;
644 
645 	isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
646 	isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
647 	isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
648 	isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
649 	isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
650 	isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
651 	isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
652 
653 	if (!acpi_disabled) {
654 		riscv_fill_hwcap_from_isa_string(isa2hwcap);
655 	} else {
656 		int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
657 
658 		if (ret && riscv_isa_fallback) {
659 			pr_info("Falling back to deprecated \"riscv,isa\"\n");
660 			riscv_fill_hwcap_from_isa_string(isa2hwcap);
661 		}
662 	}
663 
664 	/*
665 	 * We don't support systems with F but without D, so mask those out
666 	 * here.
667 	 */
668 	if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
669 		pr_info("This kernel does not support systems with F but not D\n");
670 		elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
671 	}
672 
673 	if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
674 		riscv_v_setup_vsize();
675 		/*
676 		 * ISA string in device tree might have 'v' flag, but
677 		 * CONFIG_RISCV_ISA_V is disabled in kernel.
678 		 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
679 		 */
680 		if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
681 			elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
682 	}
683 
684 	memset(print_str, 0, sizeof(print_str));
685 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
686 		if (riscv_isa[0] & BIT_MASK(i))
687 			print_str[j++] = (char)('a' + i);
688 	pr_info("riscv: base ISA extensions %s\n", print_str);
689 
690 	memset(print_str, 0, sizeof(print_str));
691 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
692 		if (elf_hwcap & BIT_MASK(i))
693 			print_str[j++] = (char)('a' + i);
694 	pr_info("riscv: ELF capabilities %s\n", print_str);
695 }
696 
697 unsigned long riscv_get_elf_hwcap(void)
698 {
699 	unsigned long hwcap;
700 
701 	hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
702 
703 	if (!riscv_v_vstate_ctrl_user_allowed())
704 		hwcap &= ~COMPAT_HWCAP_ISA_V;
705 
706 	return hwcap;
707 }
708 
709 static int check_unaligned_access(void *param)
710 {
711 	int cpu = smp_processor_id();
712 	u64 start_cycles, end_cycles;
713 	u64 word_cycles;
714 	u64 byte_cycles;
715 	int ratio;
716 	unsigned long start_jiffies, now;
717 	struct page *page = param;
718 	void *dst;
719 	void *src;
720 	long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
721 
722 	if (check_unaligned_access_emulated(cpu))
723 		return 0;
724 
725 	/* Make an unaligned destination buffer. */
726 	dst = (void *)((unsigned long)page_address(page) | 0x1);
727 	/* Unalign src as well, but differently (off by 1 + 2 = 3). */
728 	src = dst + (MISALIGNED_BUFFER_SIZE / 2);
729 	src += 2;
730 	word_cycles = -1ULL;
731 	/* Do a warmup. */
732 	__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
733 	preempt_disable();
734 	start_jiffies = jiffies;
735 	while ((now = jiffies) == start_jiffies)
736 		cpu_relax();
737 
738 	/*
739 	 * For a fixed amount of time, repeatedly try the function, and take
740 	 * the best time in cycles as the measurement.
741 	 */
742 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
743 		start_cycles = get_cycles64();
744 		/* Ensure the CSR read can't reorder WRT to the copy. */
745 		mb();
746 		__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
747 		/* Ensure the copy ends before the end time is snapped. */
748 		mb();
749 		end_cycles = get_cycles64();
750 		if ((end_cycles - start_cycles) < word_cycles)
751 			word_cycles = end_cycles - start_cycles;
752 	}
753 
754 	byte_cycles = -1ULL;
755 	__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
756 	start_jiffies = jiffies;
757 	while ((now = jiffies) == start_jiffies)
758 		cpu_relax();
759 
760 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
761 		start_cycles = get_cycles64();
762 		mb();
763 		__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
764 		mb();
765 		end_cycles = get_cycles64();
766 		if ((end_cycles - start_cycles) < byte_cycles)
767 			byte_cycles = end_cycles - start_cycles;
768 	}
769 
770 	preempt_enable();
771 
772 	/* Don't divide by zero. */
773 	if (!word_cycles || !byte_cycles) {
774 		pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
775 			cpu);
776 
777 		return 0;
778 	}
779 
780 	if (word_cycles < byte_cycles)
781 		speed = RISCV_HWPROBE_MISALIGNED_FAST;
782 
783 	ratio = div_u64((byte_cycles * 100), word_cycles);
784 	pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
785 		cpu,
786 		ratio / 100,
787 		ratio % 100,
788 		(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
789 
790 	per_cpu(misaligned_access_speed, cpu) = speed;
791 
792 	/*
793 	 * Set the value of fast_misaligned_access of a CPU. These operations
794 	 * are atomic to avoid race conditions.
795 	 */
796 	if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
797 		cpumask_set_cpu(cpu, &fast_misaligned_access);
798 	else
799 		cpumask_clear_cpu(cpu, &fast_misaligned_access);
800 
801 	return 0;
802 }
803 
804 static void check_unaligned_access_nonboot_cpu(void *param)
805 {
806 	unsigned int cpu = smp_processor_id();
807 	struct page **pages = param;
808 
809 	if (smp_processor_id() != 0)
810 		check_unaligned_access(pages[cpu]);
811 }
812 
813 DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
814 
815 static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
816 {
817 	if (cpumask_weight(mask) == weight)
818 		static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
819 	else
820 		static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
821 }
822 
823 static void set_unaligned_access_static_branches_except_cpu(int cpu)
824 {
825 	/*
826 	 * Same as set_unaligned_access_static_branches, except excludes the
827 	 * given CPU from the result. When a CPU is hotplugged into an offline
828 	 * state, this function is called before the CPU is set to offline in
829 	 * the cpumask, and thus the CPU needs to be explicitly excluded.
830 	 */
831 
832 	cpumask_t fast_except_me;
833 
834 	cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
835 	cpumask_clear_cpu(cpu, &fast_except_me);
836 
837 	modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
838 }
839 
840 static void set_unaligned_access_static_branches(void)
841 {
842 	/*
843 	 * This will be called after check_unaligned_access_all_cpus so the
844 	 * result of unaligned access speed for all CPUs will be available.
845 	 *
846 	 * To avoid the number of online cpus changing between reading
847 	 * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
848 	 * held before calling this function.
849 	 */
850 
851 	cpumask_t fast_and_online;
852 
853 	cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
854 
855 	modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
856 }
857 
858 static int lock_and_set_unaligned_access_static_branch(void)
859 {
860 	cpus_read_lock();
861 	set_unaligned_access_static_branches();
862 	cpus_read_unlock();
863 
864 	return 0;
865 }
866 
867 arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
868 
869 static int riscv_online_cpu(unsigned int cpu)
870 {
871 	static struct page *buf;
872 
873 	/* We are already set since the last check */
874 	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
875 		goto exit;
876 
877 	buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
878 	if (!buf) {
879 		pr_warn("Allocation failure, not measuring misaligned performance\n");
880 		return -ENOMEM;
881 	}
882 
883 	check_unaligned_access(buf);
884 	__free_pages(buf, MISALIGNED_BUFFER_ORDER);
885 
886 exit:
887 	set_unaligned_access_static_branches();
888 
889 	return 0;
890 }
891 
892 static int riscv_offline_cpu(unsigned int cpu)
893 {
894 	set_unaligned_access_static_branches_except_cpu(cpu);
895 
896 	return 0;
897 }
898 
899 /* Measure unaligned access on all CPUs present at boot in parallel. */
900 static int check_unaligned_access_all_cpus(void)
901 {
902 	unsigned int cpu;
903 	unsigned int cpu_count = num_possible_cpus();
904 	struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
905 				     GFP_KERNEL);
906 
907 	if (!bufs) {
908 		pr_warn("Allocation failure, not measuring misaligned performance\n");
909 		return 0;
910 	}
911 
912 	/*
913 	 * Allocate separate buffers for each CPU so there's no fighting over
914 	 * cache lines.
915 	 */
916 	for_each_cpu(cpu, cpu_online_mask) {
917 		bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
918 		if (!bufs[cpu]) {
919 			pr_warn("Allocation failure, not measuring misaligned performance\n");
920 			goto out;
921 		}
922 	}
923 
924 	/* Check everybody except 0, who stays behind to tend jiffies. */
925 	on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
926 
927 	/* Check core 0. */
928 	smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
929 
930 	/*
931 	 * Setup hotplug callbacks for any new CPUs that come online or go
932 	 * offline.
933 	 */
934 	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
935 				  riscv_online_cpu, riscv_offline_cpu);
936 
937 out:
938 	unaligned_emulation_finish();
939 	for_each_cpu(cpu, cpu_online_mask) {
940 		if (bufs[cpu])
941 			__free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
942 	}
943 
944 	kfree(bufs);
945 	return 0;
946 }
947 
948 arch_initcall(check_unaligned_access_all_cpus);
949 
950 void riscv_user_isa_enable(void)
951 {
952 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
953 		csr_set(CSR_SENVCFG, ENVCFG_CBZE);
954 }
955 
956 #ifdef CONFIG_RISCV_ALTERNATIVE
957 /*
958  * Alternative patch sites consider 48 bits when determining when to patch
959  * the old instruction sequence with the new. These bits are broken into a
960  * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
961  * patch site is for an erratum, identified by the 32-bit patch ID. When
962  * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
963  * further break down patch ID into two 16-bit numbers. The lower 16 bits
964  * are the cpufeature ID and the upper 16 bits are used for a value specific
965  * to the cpufeature and patch site. If the upper 16 bits are zero, then it
966  * implies no specific value is specified. cpufeatures that want to control
967  * patching on a per-site basis will provide non-zero values and implement
968  * checks here. The checks return true when patching should be done, and
969  * false otherwise.
970  */
971 static bool riscv_cpufeature_patch_check(u16 id, u16 value)
972 {
973 	if (!value)
974 		return true;
975 
976 	switch (id) {
977 	case RISCV_ISA_EXT_ZICBOZ:
978 		/*
979 		 * Zicboz alternative applications provide the maximum
980 		 * supported block size order, or zero when it doesn't
981 		 * matter. If the current block size exceeds the maximum,
982 		 * then the alternative cannot be applied.
983 		 */
984 		return riscv_cboz_block_size <= (1U << value);
985 	}
986 
987 	return false;
988 }
989 
990 void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
991 						  struct alt_entry *end,
992 						  unsigned int stage)
993 {
994 	struct alt_entry *alt;
995 	void *oldptr, *altptr;
996 	u16 id, value;
997 
998 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
999 		return;
1000 
1001 	for (alt = begin; alt < end; alt++) {
1002 		if (alt->vendor_id != 0)
1003 			continue;
1004 
1005 		id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
1006 
1007 		if (id >= RISCV_ISA_EXT_MAX) {
1008 			WARN(1, "This extension id:%d is not in ISA extension list", id);
1009 			continue;
1010 		}
1011 
1012 		if (!__riscv_isa_extension_available(NULL, id))
1013 			continue;
1014 
1015 		value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
1016 		if (!riscv_cpufeature_patch_check(id, value))
1017 			continue;
1018 
1019 		oldptr = ALT_OLD_PTR(alt);
1020 		altptr = ALT_ALT_PTR(alt);
1021 
1022 		mutex_lock(&text_mutex);
1023 		patch_text_nosync(oldptr, altptr, alt->alt_len);
1024 		riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
1025 		mutex_unlock(&text_mutex);
1026 	}
1027 }
1028 #endif
1029