xref: /linux/arch/riscv/include/asm/cpufeature.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2022-2024 Rivos, Inc
4  */
5 
6 #ifndef _ASM_CPUFEATURE_H
7 #define _ASM_CPUFEATURE_H
8 
9 #include <linux/bitmap.h>
10 #include <linux/jump_label.h>
11 #include <asm/hwcap.h>
12 #include <asm/alternative-macros.h>
13 #include <asm/errno.h>
14 
15 /*
16  * These are probed via a device_initcall(), via either the SBI or directly
17  * from the corresponding CSRs.
18  */
19 struct riscv_cpuinfo {
20 	unsigned long mvendorid;
21 	unsigned long marchid;
22 	unsigned long mimpid;
23 };
24 
25 struct riscv_isainfo {
26 	DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
27 };
28 
29 DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
30 
31 /* Per-cpu ISA extensions. */
32 extern struct riscv_isainfo hart_isa[NR_CPUS];
33 
34 void riscv_user_isa_enable(void);
35 
36 #if defined(CONFIG_RISCV_MISALIGNED)
37 bool check_unaligned_access_emulated_all_cpus(void);
38 void unaligned_emulation_finish(void);
39 bool unaligned_ctl_available(void);
40 DECLARE_PER_CPU(long, misaligned_access_speed);
41 #else
42 static inline bool unaligned_ctl_available(void)
43 {
44 	return false;
45 }
46 #endif
47 
48 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
49 DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
50 
51 static __always_inline bool has_fast_unaligned_accesses(void)
52 {
53 	return static_branch_likely(&fast_unaligned_access_speed_key);
54 }
55 #else
56 static __always_inline bool has_fast_unaligned_accesses(void)
57 {
58 	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
59 		return true;
60 	else
61 		return false;
62 }
63 #endif
64 
65 unsigned long riscv_get_elf_hwcap(void);
66 
67 struct riscv_isa_ext_data {
68 	const unsigned int id;
69 	const char *name;
70 	const char *property;
71 	const unsigned int *subset_ext_ids;
72 	const unsigned int subset_ext_size;
73 };
74 
75 extern const struct riscv_isa_ext_data riscv_isa_ext[];
76 extern const size_t riscv_isa_ext_count;
77 extern bool riscv_isa_fallback;
78 
79 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
80 
81 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
82 #define riscv_isa_extension_available(isa_bitmap, ext)	\
83 	__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
84 
85 static __always_inline bool
86 riscv_has_extension_likely(const unsigned long ext)
87 {
88 	compiletime_assert(ext < RISCV_ISA_EXT_MAX,
89 			   "ext must be < RISCV_ISA_EXT_MAX");
90 
91 	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
92 		asm goto(
93 		ALTERNATIVE("j	%l[l_no]", "nop", 0, %[ext], 1)
94 		:
95 		: [ext] "i" (ext)
96 		:
97 		: l_no);
98 	} else {
99 		if (!__riscv_isa_extension_available(NULL, ext))
100 			goto l_no;
101 	}
102 
103 	return true;
104 l_no:
105 	return false;
106 }
107 
108 static __always_inline bool
109 riscv_has_extension_unlikely(const unsigned long ext)
110 {
111 	compiletime_assert(ext < RISCV_ISA_EXT_MAX,
112 			   "ext must be < RISCV_ISA_EXT_MAX");
113 
114 	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
115 		asm goto(
116 		ALTERNATIVE("nop", "j	%l[l_yes]", 0, %[ext], 1)
117 		:
118 		: [ext] "i" (ext)
119 		:
120 		: l_yes);
121 	} else {
122 		if (__riscv_isa_extension_available(NULL, ext))
123 			goto l_yes;
124 	}
125 
126 	return false;
127 l_yes:
128 	return true;
129 }
130 
131 static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
132 {
133 	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
134 		return true;
135 
136 	return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
137 }
138 
139 static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
140 {
141 	if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
142 		return true;
143 
144 	return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
145 }
146 
147 #endif
148