1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/coproc.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
12 #define __ARM64_KVM_SYS_REGS_LOCAL_H__
13
14 #include <linux/bsearch.h>
15
16 #define reg_to_encoding(x) \
17 sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
18 (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
19
20 struct sys_reg_params {
21 u8 Op0;
22 u8 Op1;
23 u8 CRn;
24 u8 CRm;
25 u8 Op2;
26 u64 regval;
27 bool is_write;
28 };
29
30 #define encoding_to_params(reg) \
31 ((struct sys_reg_params){ .Op0 = sys_reg_Op0(reg), \
32 .Op1 = sys_reg_Op1(reg), \
33 .CRn = sys_reg_CRn(reg), \
34 .CRm = sys_reg_CRm(reg), \
35 .Op2 = sys_reg_Op2(reg) })
36
37 #define esr_sys64_to_params(esr) \
38 ((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3, \
39 .Op1 = ((esr) >> 14) & 0x7, \
40 .CRn = ((esr) >> 10) & 0xf, \
41 .CRm = ((esr) >> 1) & 0xf, \
42 .Op2 = ((esr) >> 17) & 0x7, \
43 .is_write = !((esr) & 1) })
44
45 #define esr_cp1x_32_to_params(esr) \
46 ((struct sys_reg_params){ .Op1 = ((esr) >> 14) & 0x7, \
47 .CRn = ((esr) >> 10) & 0xf, \
48 .CRm = ((esr) >> 1) & 0xf, \
49 .Op2 = ((esr) >> 17) & 0x7, \
50 .is_write = !((esr) & 1) })
51
52 struct sys_reg_desc {
53 /* Sysreg string for debug */
54 const char *name;
55
56 enum {
57 AA32_DIRECT,
58 AA32_LO,
59 AA32_HI,
60 } aarch32_map;
61
62 /* MRS/MSR instruction which accesses it. */
63 u8 Op0;
64 u8 Op1;
65 u8 CRn;
66 u8 CRm;
67 u8 Op2;
68
69 /* Trapped access from guest, if non-NULL. */
70 bool (*access)(struct kvm_vcpu *,
71 struct sys_reg_params *,
72 const struct sys_reg_desc *);
73
74 /*
75 * Initialization for vcpu. Return initialized value, or KVM
76 * sanitized value for ID registers.
77 */
78 u64 (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
79
80 /* Index into sys_reg[], or 0 if we don't need to save it. */
81 int reg;
82
83 /* Value (usually reset value), or write mask for idregs */
84 u64 val;
85
86 /* Custom get/set_user functions, fallback to generic if NULL */
87 int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
88 u64 *val);
89 int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
90 u64 val);
91
92 /* Return mask of REG_* runtime visibility overrides */
93 unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
94 const struct sys_reg_desc *rd);
95 };
96
97 #define REG_HIDDEN (1 << 0) /* hidden from userspace and guest */
98 #define REG_RAZ (1 << 1) /* RAZ from userspace and guest */
99 #define REG_USER_WI (1 << 2) /* WI from userspace only */
100
101 static __printf(2, 3)
print_sys_reg_msg(const struct sys_reg_params * p,char * fmt,...)102 inline void print_sys_reg_msg(const struct sys_reg_params *p,
103 char *fmt, ...)
104 {
105 va_list va;
106
107 va_start(va, fmt);
108 /* Look, we even formatted it for you to paste into the table! */
109 kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
110 &(struct va_format){ fmt, &va },
111 p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
112 va_end(va);
113 }
114
print_sys_reg_instr(const struct sys_reg_params * p)115 static inline void print_sys_reg_instr(const struct sys_reg_params *p)
116 {
117 /* GCC warns on an empty format string */
118 print_sys_reg_msg(p, "%s", "");
119 }
120
ignore_write(struct kvm_vcpu * vcpu,const struct sys_reg_params * p)121 static inline bool ignore_write(struct kvm_vcpu *vcpu,
122 const struct sys_reg_params *p)
123 {
124 return true;
125 }
126
read_zero(struct kvm_vcpu * vcpu,struct sys_reg_params * p)127 static inline bool read_zero(struct kvm_vcpu *vcpu,
128 struct sys_reg_params *p)
129 {
130 p->regval = 0;
131 return true;
132 }
133
134 /* Reset functions */
reset_unknown(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)135 static inline u64 reset_unknown(struct kvm_vcpu *vcpu,
136 const struct sys_reg_desc *r)
137 {
138 BUG_ON(!r->reg);
139 BUG_ON(r->reg >= NR_SYS_REGS);
140 __vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
141 return __vcpu_sys_reg(vcpu, r->reg);
142 }
143
reset_val(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)144 static inline u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
145 {
146 BUG_ON(!r->reg);
147 BUG_ON(r->reg >= NR_SYS_REGS);
148 __vcpu_sys_reg(vcpu, r->reg) = r->val;
149 return __vcpu_sys_reg(vcpu, r->reg);
150 }
151
sysreg_visibility(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)152 static inline unsigned int sysreg_visibility(const struct kvm_vcpu *vcpu,
153 const struct sys_reg_desc *r)
154 {
155 if (likely(!r->visibility))
156 return 0;
157
158 return r->visibility(vcpu, r);
159 }
160
sysreg_hidden(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)161 static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
162 const struct sys_reg_desc *r)
163 {
164 return sysreg_visibility(vcpu, r) & REG_HIDDEN;
165 }
166
sysreg_visible_as_raz(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)167 static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
168 const struct sys_reg_desc *r)
169 {
170 return sysreg_visibility(vcpu, r) & REG_RAZ;
171 }
172
sysreg_user_write_ignore(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)173 static inline bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu,
174 const struct sys_reg_desc *r)
175 {
176 return sysreg_visibility(vcpu, r) & REG_USER_WI;
177 }
178
cmp_sys_reg(const struct sys_reg_desc * i1,const struct sys_reg_desc * i2)179 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
180 const struct sys_reg_desc *i2)
181 {
182 BUG_ON(i1 == i2);
183 if (!i1)
184 return 1;
185 else if (!i2)
186 return -1;
187 if (i1->Op0 != i2->Op0)
188 return i1->Op0 - i2->Op0;
189 if (i1->Op1 != i2->Op1)
190 return i1->Op1 - i2->Op1;
191 if (i1->CRn != i2->CRn)
192 return i1->CRn - i2->CRn;
193 if (i1->CRm != i2->CRm)
194 return i1->CRm - i2->CRm;
195 return i1->Op2 - i2->Op2;
196 }
197
match_sys_reg(const void * key,const void * elt)198 static inline int match_sys_reg(const void *key, const void *elt)
199 {
200 const unsigned long pval = (unsigned long)key;
201 const struct sys_reg_desc *r = elt;
202
203 return pval - reg_to_encoding(r);
204 }
205
206 static inline const struct sys_reg_desc *
find_reg(const struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)207 find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
208 unsigned int num)
209 {
210 unsigned long pval = reg_to_encoding(params);
211
212 return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
213 }
214
215 const struct sys_reg_desc *get_reg_by_id(u64 id,
216 const struct sys_reg_desc table[],
217 unsigned int num);
218
219 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
220 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
221 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
222 const struct sys_reg_desc table[], unsigned int num);
223 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
224 const struct sys_reg_desc table[], unsigned int num);
225
226 bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
227
228 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu);
229
230 #define AA32(_x) .aarch32_map = AA32_##_x
231 #define Op0(_x) .Op0 = _x
232 #define Op1(_x) .Op1 = _x
233 #define CRn(_x) .CRn = _x
234 #define CRm(_x) .CRm = _x
235 #define Op2(_x) .Op2 = _x
236
237 #define SYS_DESC(reg) \
238 .name = #reg, \
239 Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
240 CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
241 Op2(sys_reg_Op2(reg))
242
243 #define CP15_SYS_DESC(reg) \
244 .name = #reg, \
245 .aarch32_map = AA32_DIRECT, \
246 Op0(0), Op1(sys_reg_Op1(reg)), \
247 CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
248 Op2(sys_reg_Op2(reg))
249
250 #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
251