1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check for KVM_GET_REG_LIST regressions.
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 *
7 * While the blessed list should be created from the oldest possible
8 * kernel, we can't go older than v5.2, though, because that's the first
9 * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
10 * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
11 * registers won't match expectations.
12 */
13 #include <stdio.h>
14 #include "kvm_util.h"
15 #include "test_util.h"
16 #include "processor.h"
17
18 #define SYS_REG(r) ARM64_SYS_REG(sys_reg_Op0(SYS_ ## r), \
19 sys_reg_Op1(SYS_ ## r), \
20 sys_reg_CRn(SYS_ ## r), \
21 sys_reg_CRm(SYS_ ## r), \
22 sys_reg_Op2(SYS_ ## r))
23
24 struct feature_id_reg {
25 __u64 reg;
26 __u64 id_reg;
27 __u64 feat_shift;
28 __u64 feat_min;
29 };
30
31 #define FEAT(id, f, v) \
32 .id_reg = SYS_REG(id), \
33 .feat_shift = id ## _ ## f ## _SHIFT, \
34 .feat_min = id ## _ ## f ## _ ## v
35
36 #define REG_FEAT(r, id, f, v) \
37 { \
38 .reg = SYS_REG(r), \
39 FEAT(id, f, v) \
40 }
41
42 static struct feature_id_reg feat_id_regs[] = {
43 REG_FEAT(TCR2_EL1, ID_AA64MMFR3_EL1, TCRX, IMP),
44 REG_FEAT(TCR2_EL2, ID_AA64MMFR3_EL1, TCRX, IMP),
45 REG_FEAT(PIRE0_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
46 REG_FEAT(PIRE0_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
47 REG_FEAT(PIR_EL1, ID_AA64MMFR3_EL1, S1PIE, IMP),
48 REG_FEAT(PIR_EL2, ID_AA64MMFR3_EL1, S1PIE, IMP),
49 REG_FEAT(POR_EL1, ID_AA64MMFR3_EL1, S1POE, IMP),
50 REG_FEAT(POR_EL0, ID_AA64MMFR3_EL1, S1POE, IMP),
51 REG_FEAT(POR_EL2, ID_AA64MMFR3_EL1, S1POE, IMP),
52 REG_FEAT(HCRX_EL2, ID_AA64MMFR1_EL1, HCX, IMP),
53 REG_FEAT(HFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
54 REG_FEAT(HFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
55 REG_FEAT(HFGITR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
56 REG_FEAT(HDFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
57 REG_FEAT(HDFGWTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
58 REG_FEAT(HAFGRTR_EL2, ID_AA64MMFR0_EL1, FGT, IMP),
59 REG_FEAT(HFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
60 REG_FEAT(HFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
61 REG_FEAT(HFGITR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
62 REG_FEAT(HDFGRTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
63 REG_FEAT(HDFGWTR2_EL2, ID_AA64MMFR0_EL1, FGT, FGT2),
64 REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
65 REG_FEAT(SCTLR2_EL1, ID_AA64MMFR3_EL1, SCTLRX, IMP),
66 REG_FEAT(SCTLR2_EL2, ID_AA64MMFR3_EL1, SCTLRX, IMP),
67 REG_FEAT(VDISR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
68 REG_FEAT(VSESR_EL2, ID_AA64PFR0_EL1, RAS, IMP),
69 REG_FEAT(VNCR_EL2, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY),
70 REG_FEAT(CNTHV_CTL_EL2, ID_AA64MMFR1_EL1, VH, IMP),
71 REG_FEAT(CNTHV_CVAL_EL2,ID_AA64MMFR1_EL1, VH, IMP),
72 REG_FEAT(ZCR_EL2, ID_AA64PFR0_EL1, SVE, IMP),
73 };
74
filter_reg(__u64 reg)75 bool filter_reg(__u64 reg)
76 {
77 /*
78 * DEMUX register presence depends on the host's CLIDR_EL1.
79 * This means there's no set of them that we can bless.
80 */
81 if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
82 return true;
83
84 return false;
85 }
86
check_supported_feat_reg(struct kvm_vcpu * vcpu,__u64 reg)87 static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
88 {
89 int i, ret;
90 __u64 data, feat_val;
91
92 for (i = 0; i < ARRAY_SIZE(feat_id_regs); i++) {
93 if (feat_id_regs[i].reg == reg) {
94 ret = __vcpu_get_reg(vcpu, feat_id_regs[i].id_reg, &data);
95 if (ret < 0)
96 return false;
97
98 feat_val = ((data >> feat_id_regs[i].feat_shift) & 0xf);
99 return feat_val >= feat_id_regs[i].feat_min;
100 }
101 }
102
103 return true;
104 }
105
check_supported_reg(struct kvm_vcpu * vcpu,__u64 reg)106 bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
107 {
108 return check_supported_feat_reg(vcpu, reg);
109 }
110
check_reject_set(int err)111 bool check_reject_set(int err)
112 {
113 return err == EPERM;
114 }
115
finalize_vcpu(struct kvm_vcpu * vcpu,struct vcpu_reg_list * c)116 void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
117 {
118 struct vcpu_reg_sublist *s;
119 int feature;
120
121 for_each_sublist(c, s) {
122 if (s->finalize) {
123 feature = s->feature;
124 vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
125 }
126 }
127 }
128
129 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
130
131 #define CORE_REGS_XX_NR_WORDS 2
132 #define CORE_SPSR_XX_NR_WORDS 2
133 #define CORE_FPREGS_XX_NR_WORDS 4
134
core_id_to_str(const char * prefix,__u64 id)135 static const char *core_id_to_str(const char *prefix, __u64 id)
136 {
137 __u64 core_off = id & ~REG_MASK, idx;
138
139 /*
140 * core_off is the offset into struct kvm_regs
141 */
142 switch (core_off) {
143 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
144 KVM_REG_ARM_CORE_REG(regs.regs[30]):
145 idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
146 TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
147 return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
148 case KVM_REG_ARM_CORE_REG(regs.sp):
149 return "KVM_REG_ARM_CORE_REG(regs.sp)";
150 case KVM_REG_ARM_CORE_REG(regs.pc):
151 return "KVM_REG_ARM_CORE_REG(regs.pc)";
152 case KVM_REG_ARM_CORE_REG(regs.pstate):
153 return "KVM_REG_ARM_CORE_REG(regs.pstate)";
154 case KVM_REG_ARM_CORE_REG(sp_el1):
155 return "KVM_REG_ARM_CORE_REG(sp_el1)";
156 case KVM_REG_ARM_CORE_REG(elr_el1):
157 return "KVM_REG_ARM_CORE_REG(elr_el1)";
158 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
159 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
160 idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
161 TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
162 return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
163 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
164 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
165 idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
166 TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
167 return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
168 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
169 return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
170 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
171 return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
172 }
173
174 TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
175 return NULL;
176 }
177
sve_id_to_str(const char * prefix,__u64 id)178 static const char *sve_id_to_str(const char *prefix, __u64 id)
179 {
180 __u64 sve_off, n, i;
181
182 if (id == KVM_REG_ARM64_SVE_VLS)
183 return "KVM_REG_ARM64_SVE_VLS";
184
185 sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
186 i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
187
188 TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
189
190 switch (sve_off) {
191 case KVM_REG_ARM64_SVE_ZREG_BASE ...
192 KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
193 n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
194 TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
195 "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
196 return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
197 case KVM_REG_ARM64_SVE_PREG_BASE ...
198 KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
199 n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
200 TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
201 "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
202 return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
203 case KVM_REG_ARM64_SVE_FFR_BASE:
204 TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
205 "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
206 return "KVM_REG_ARM64_SVE_FFR(0)";
207 }
208
209 return NULL;
210 }
211
print_reg(const char * prefix,__u64 id)212 void print_reg(const char *prefix, __u64 id)
213 {
214 unsigned op0, op1, crn, crm, op2;
215 const char *reg_size = NULL;
216
217 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
218 "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
219
220 switch (id & KVM_REG_SIZE_MASK) {
221 case KVM_REG_SIZE_U8:
222 reg_size = "KVM_REG_SIZE_U8";
223 break;
224 case KVM_REG_SIZE_U16:
225 reg_size = "KVM_REG_SIZE_U16";
226 break;
227 case KVM_REG_SIZE_U32:
228 reg_size = "KVM_REG_SIZE_U32";
229 break;
230 case KVM_REG_SIZE_U64:
231 reg_size = "KVM_REG_SIZE_U64";
232 break;
233 case KVM_REG_SIZE_U128:
234 reg_size = "KVM_REG_SIZE_U128";
235 break;
236 case KVM_REG_SIZE_U256:
237 reg_size = "KVM_REG_SIZE_U256";
238 break;
239 case KVM_REG_SIZE_U512:
240 reg_size = "KVM_REG_SIZE_U512";
241 break;
242 case KVM_REG_SIZE_U1024:
243 reg_size = "KVM_REG_SIZE_U1024";
244 break;
245 case KVM_REG_SIZE_U2048:
246 reg_size = "KVM_REG_SIZE_U2048";
247 break;
248 default:
249 TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
250 prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
251 }
252
253 switch (id & KVM_REG_ARM_COPROC_MASK) {
254 case KVM_REG_ARM_CORE:
255 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
256 break;
257 case KVM_REG_ARM_DEMUX:
258 TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
259 "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
260 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
261 reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
262 break;
263 case KVM_REG_ARM64_SYSREG:
264 op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
265 op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
266 crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
267 crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
268 op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
269 TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
270 "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
271 printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
272 break;
273 case KVM_REG_ARM_FW:
274 TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
275 "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
276 printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
277 break;
278 case KVM_REG_ARM_FW_FEAT_BMAP:
279 TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
280 "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
281 printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
282 break;
283 case KVM_REG_ARM64_SVE:
284 printf("\t%s,\n", sve_id_to_str(prefix, id));
285 break;
286 default:
287 TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
288 prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
289 }
290 }
291
292 /*
293 * The original blessed list was primed with the output of kernel version
294 * v4.15 with --core-reg-fixup and then later updated with new registers.
295 * (The --core-reg-fixup option and it's fixup function have been removed
296 * from the test, as it's unlikely to use this type of test on a kernel
297 * older than v5.2.)
298 *
299 * The blessed list is up to date with kernel version v6.4 (or so we hope)
300 */
301 static __u64 base_regs[] = {
302 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
303 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
304 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
305 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
306 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
307 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
308 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
309 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
310 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
311 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
312 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
313 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
314 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
315 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
316 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
317 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
318 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
319 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
320 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
321 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
322 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
323 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
324 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
325 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
326 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
327 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
328 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
329 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
330 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
331 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
332 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
333 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
334 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
335 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
336 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
337 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
338 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
339 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
340 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
341 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
342 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
343 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
344 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
345 KVM_REG_ARM_FW_REG(0), /* KVM_REG_ARM_PSCI_VERSION */
346 KVM_REG_ARM_FW_REG(1), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
347 KVM_REG_ARM_FW_REG(2), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
348 KVM_REG_ARM_FW_REG(3), /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
349 KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
350 KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
351 KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
352 KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
353
354 /*
355 * EL0 Virtual Timer Registers
356 *
357 * WARNING:
358 * KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
359 * with the appropriate register encodings. Their values have been
360 * accidentally swapped. As this is set API, the definitions here
361 * must be used, rather than ones derived from the encodings.
362 */
363 KVM_ARM64_SYS_REG(SYS_CNTV_CTL_EL0),
364 KVM_REG_ARM_TIMER_CVAL,
365 KVM_REG_ARM_TIMER_CNT,
366
367 ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
368 ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
369 ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
370 ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
371 ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
372 ARM64_SYS_REG(2, 0, 0, 0, 4),
373 ARM64_SYS_REG(2, 0, 0, 0, 5),
374 ARM64_SYS_REG(2, 0, 0, 0, 6),
375 ARM64_SYS_REG(2, 0, 0, 0, 7),
376 ARM64_SYS_REG(2, 0, 0, 1, 4),
377 ARM64_SYS_REG(2, 0, 0, 1, 5),
378 ARM64_SYS_REG(2, 0, 0, 1, 6),
379 ARM64_SYS_REG(2, 0, 0, 1, 7),
380 ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
381 ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
382 ARM64_SYS_REG(2, 0, 0, 2, 4),
383 ARM64_SYS_REG(2, 0, 0, 2, 5),
384 ARM64_SYS_REG(2, 0, 0, 2, 6),
385 ARM64_SYS_REG(2, 0, 0, 2, 7),
386 ARM64_SYS_REG(2, 0, 0, 3, 4),
387 ARM64_SYS_REG(2, 0, 0, 3, 5),
388 ARM64_SYS_REG(2, 0, 0, 3, 6),
389 ARM64_SYS_REG(2, 0, 0, 3, 7),
390 ARM64_SYS_REG(2, 0, 0, 4, 4),
391 ARM64_SYS_REG(2, 0, 0, 4, 5),
392 ARM64_SYS_REG(2, 0, 0, 4, 6),
393 ARM64_SYS_REG(2, 0, 0, 4, 7),
394 ARM64_SYS_REG(2, 0, 0, 5, 4),
395 ARM64_SYS_REG(2, 0, 0, 5, 5),
396 ARM64_SYS_REG(2, 0, 0, 5, 6),
397 ARM64_SYS_REG(2, 0, 0, 5, 7),
398 ARM64_SYS_REG(2, 0, 0, 6, 4),
399 ARM64_SYS_REG(2, 0, 0, 6, 5),
400 ARM64_SYS_REG(2, 0, 0, 6, 6),
401 ARM64_SYS_REG(2, 0, 0, 6, 7),
402 ARM64_SYS_REG(2, 0, 0, 7, 4),
403 ARM64_SYS_REG(2, 0, 0, 7, 5),
404 ARM64_SYS_REG(2, 0, 0, 7, 6),
405 ARM64_SYS_REG(2, 0, 0, 7, 7),
406 ARM64_SYS_REG(2, 0, 0, 8, 4),
407 ARM64_SYS_REG(2, 0, 0, 8, 5),
408 ARM64_SYS_REG(2, 0, 0, 8, 6),
409 ARM64_SYS_REG(2, 0, 0, 8, 7),
410 ARM64_SYS_REG(2, 0, 0, 9, 4),
411 ARM64_SYS_REG(2, 0, 0, 9, 5),
412 ARM64_SYS_REG(2, 0, 0, 9, 6),
413 ARM64_SYS_REG(2, 0, 0, 9, 7),
414 ARM64_SYS_REG(2, 0, 0, 10, 4),
415 ARM64_SYS_REG(2, 0, 0, 10, 5),
416 ARM64_SYS_REG(2, 0, 0, 10, 6),
417 ARM64_SYS_REG(2, 0, 0, 10, 7),
418 ARM64_SYS_REG(2, 0, 0, 11, 4),
419 ARM64_SYS_REG(2, 0, 0, 11, 5),
420 ARM64_SYS_REG(2, 0, 0, 11, 6),
421 ARM64_SYS_REG(2, 0, 0, 11, 7),
422 ARM64_SYS_REG(2, 0, 0, 12, 4),
423 ARM64_SYS_REG(2, 0, 0, 12, 5),
424 ARM64_SYS_REG(2, 0, 0, 12, 6),
425 ARM64_SYS_REG(2, 0, 0, 12, 7),
426 ARM64_SYS_REG(2, 0, 0, 13, 4),
427 ARM64_SYS_REG(2, 0, 0, 13, 5),
428 ARM64_SYS_REG(2, 0, 0, 13, 6),
429 ARM64_SYS_REG(2, 0, 0, 13, 7),
430 ARM64_SYS_REG(2, 0, 0, 14, 4),
431 ARM64_SYS_REG(2, 0, 0, 14, 5),
432 ARM64_SYS_REG(2, 0, 0, 14, 6),
433 ARM64_SYS_REG(2, 0, 0, 14, 7),
434 ARM64_SYS_REG(2, 0, 0, 15, 4),
435 ARM64_SYS_REG(2, 0, 0, 15, 5),
436 ARM64_SYS_REG(2, 0, 0, 15, 6),
437 ARM64_SYS_REG(2, 0, 0, 15, 7),
438 ARM64_SYS_REG(2, 0, 1, 1, 4), /* OSLSR_EL1 */
439 ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
440 ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
441 ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
442 ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
443 ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
444 ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
445 ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
446 ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
447 ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
448 ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
449 ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
450 ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
451 ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
452 ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
453 ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
454 ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
455 ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
456 ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
457 ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
458 ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
459 ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
460 ARM64_SYS_REG(3, 0, 0, 3, 3),
461 ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
462 ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
463 ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
464 ARM64_SYS_REG(3, 0, 0, 3, 7),
465 ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
466 ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
467 ARM64_SYS_REG(3, 0, 0, 4, 2), /* ID_AA64PFR2_EL1 */
468 ARM64_SYS_REG(3, 0, 0, 4, 3),
469 ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
470 ARM64_SYS_REG(3, 0, 0, 4, 5), /* ID_AA64SMFR0_EL1 */
471 ARM64_SYS_REG(3, 0, 0, 4, 6),
472 ARM64_SYS_REG(3, 0, 0, 4, 7),
473 ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
474 ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
475 ARM64_SYS_REG(3, 0, 0, 5, 2),
476 ARM64_SYS_REG(3, 0, 0, 5, 3),
477 ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
478 ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
479 ARM64_SYS_REG(3, 0, 0, 5, 6),
480 ARM64_SYS_REG(3, 0, 0, 5, 7),
481 ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
482 ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
483 ARM64_SYS_REG(3, 0, 0, 6, 2), /* ID_AA64ISAR2_EL1 */
484 ARM64_SYS_REG(3, 0, 0, 6, 3),
485 ARM64_SYS_REG(3, 0, 0, 6, 4),
486 ARM64_SYS_REG(3, 0, 0, 6, 5),
487 ARM64_SYS_REG(3, 0, 0, 6, 6),
488 ARM64_SYS_REG(3, 0, 0, 6, 7),
489 ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
490 ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
491 ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
492 ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
493 ARM64_SYS_REG(3, 0, 0, 7, 4), /* ID_AA64MMFR4_EL1 */
494 ARM64_SYS_REG(3, 0, 0, 7, 5),
495 ARM64_SYS_REG(3, 0, 0, 7, 6),
496 ARM64_SYS_REG(3, 0, 0, 7, 7),
497 ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
498 ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
499 ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
500 KVM_ARM64_SYS_REG(SYS_SCTLR2_EL1),
501 ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
502 ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
503 ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
504 ARM64_SYS_REG(3, 0, 2, 0, 3), /* TCR2_EL1 */
505 ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
506 ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
507 ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
508 ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
509 ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
510 ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
511 ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
512 ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
513 ARM64_SYS_REG(3, 0, 10, 2, 4), /* POR_EL1 */
514 ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
515 ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
516 ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
517 ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
518 ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
519 ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
520 ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
521 ARM64_SYS_REG(3, 3, 10, 2, 4), /* POR_EL0 */
522 ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
523 ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
524 ARM64_SYS_REG(3, 3, 14, 0, 1), /* CNTPCT_EL0 */
525 ARM64_SYS_REG(3, 3, 14, 2, 1), /* CNTP_CTL_EL0 */
526 ARM64_SYS_REG(3, 3, 14, 2, 2), /* CNTP_CVAL_EL0 */
527 ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
528 ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
529 ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
530 };
531
532 static __u64 pmu_regs[] = {
533 ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
534 ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
535 ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
536 ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
537 ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
538 ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
539 ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
540 ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
541 ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
542 ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
543 ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
544 ARM64_SYS_REG(3, 3, 14, 8, 0),
545 ARM64_SYS_REG(3, 3, 14, 8, 1),
546 ARM64_SYS_REG(3, 3, 14, 8, 2),
547 ARM64_SYS_REG(3, 3, 14, 8, 3),
548 ARM64_SYS_REG(3, 3, 14, 8, 4),
549 ARM64_SYS_REG(3, 3, 14, 8, 5),
550 ARM64_SYS_REG(3, 3, 14, 8, 6),
551 ARM64_SYS_REG(3, 3, 14, 8, 7),
552 ARM64_SYS_REG(3, 3, 14, 9, 0),
553 ARM64_SYS_REG(3, 3, 14, 9, 1),
554 ARM64_SYS_REG(3, 3, 14, 9, 2),
555 ARM64_SYS_REG(3, 3, 14, 9, 3),
556 ARM64_SYS_REG(3, 3, 14, 9, 4),
557 ARM64_SYS_REG(3, 3, 14, 9, 5),
558 ARM64_SYS_REG(3, 3, 14, 9, 6),
559 ARM64_SYS_REG(3, 3, 14, 9, 7),
560 ARM64_SYS_REG(3, 3, 14, 10, 0),
561 ARM64_SYS_REG(3, 3, 14, 10, 1),
562 ARM64_SYS_REG(3, 3, 14, 10, 2),
563 ARM64_SYS_REG(3, 3, 14, 10, 3),
564 ARM64_SYS_REG(3, 3, 14, 10, 4),
565 ARM64_SYS_REG(3, 3, 14, 10, 5),
566 ARM64_SYS_REG(3, 3, 14, 10, 6),
567 ARM64_SYS_REG(3, 3, 14, 10, 7),
568 ARM64_SYS_REG(3, 3, 14, 11, 0),
569 ARM64_SYS_REG(3, 3, 14, 11, 1),
570 ARM64_SYS_REG(3, 3, 14, 11, 2),
571 ARM64_SYS_REG(3, 3, 14, 11, 3),
572 ARM64_SYS_REG(3, 3, 14, 11, 4),
573 ARM64_SYS_REG(3, 3, 14, 11, 5),
574 ARM64_SYS_REG(3, 3, 14, 11, 6),
575 ARM64_SYS_REG(3, 3, 14, 12, 0),
576 ARM64_SYS_REG(3, 3, 14, 12, 1),
577 ARM64_SYS_REG(3, 3, 14, 12, 2),
578 ARM64_SYS_REG(3, 3, 14, 12, 3),
579 ARM64_SYS_REG(3, 3, 14, 12, 4),
580 ARM64_SYS_REG(3, 3, 14, 12, 5),
581 ARM64_SYS_REG(3, 3, 14, 12, 6),
582 ARM64_SYS_REG(3, 3, 14, 12, 7),
583 ARM64_SYS_REG(3, 3, 14, 13, 0),
584 ARM64_SYS_REG(3, 3, 14, 13, 1),
585 ARM64_SYS_REG(3, 3, 14, 13, 2),
586 ARM64_SYS_REG(3, 3, 14, 13, 3),
587 ARM64_SYS_REG(3, 3, 14, 13, 4),
588 ARM64_SYS_REG(3, 3, 14, 13, 5),
589 ARM64_SYS_REG(3, 3, 14, 13, 6),
590 ARM64_SYS_REG(3, 3, 14, 13, 7),
591 ARM64_SYS_REG(3, 3, 14, 14, 0),
592 ARM64_SYS_REG(3, 3, 14, 14, 1),
593 ARM64_SYS_REG(3, 3, 14, 14, 2),
594 ARM64_SYS_REG(3, 3, 14, 14, 3),
595 ARM64_SYS_REG(3, 3, 14, 14, 4),
596 ARM64_SYS_REG(3, 3, 14, 14, 5),
597 ARM64_SYS_REG(3, 3, 14, 14, 6),
598 ARM64_SYS_REG(3, 3, 14, 14, 7),
599 ARM64_SYS_REG(3, 3, 14, 15, 0),
600 ARM64_SYS_REG(3, 3, 14, 15, 1),
601 ARM64_SYS_REG(3, 3, 14, 15, 2),
602 ARM64_SYS_REG(3, 3, 14, 15, 3),
603 ARM64_SYS_REG(3, 3, 14, 15, 4),
604 ARM64_SYS_REG(3, 3, 14, 15, 5),
605 ARM64_SYS_REG(3, 3, 14, 15, 6),
606 ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
607 };
608
609 static __u64 vregs[] = {
610 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
611 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
612 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
613 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
614 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
615 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
616 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
617 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
618 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
619 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
620 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
621 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
622 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
623 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
624 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
625 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
626 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
627 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
628 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
629 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
630 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
631 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
632 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
633 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
634 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
635 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
636 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
637 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
638 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
639 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
640 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
641 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
642 };
643
644 static __u64 sve_regs[] = {
645 KVM_REG_ARM64_SVE_VLS,
646 KVM_REG_ARM64_SVE_ZREG(0, 0),
647 KVM_REG_ARM64_SVE_ZREG(1, 0),
648 KVM_REG_ARM64_SVE_ZREG(2, 0),
649 KVM_REG_ARM64_SVE_ZREG(3, 0),
650 KVM_REG_ARM64_SVE_ZREG(4, 0),
651 KVM_REG_ARM64_SVE_ZREG(5, 0),
652 KVM_REG_ARM64_SVE_ZREG(6, 0),
653 KVM_REG_ARM64_SVE_ZREG(7, 0),
654 KVM_REG_ARM64_SVE_ZREG(8, 0),
655 KVM_REG_ARM64_SVE_ZREG(9, 0),
656 KVM_REG_ARM64_SVE_ZREG(10, 0),
657 KVM_REG_ARM64_SVE_ZREG(11, 0),
658 KVM_REG_ARM64_SVE_ZREG(12, 0),
659 KVM_REG_ARM64_SVE_ZREG(13, 0),
660 KVM_REG_ARM64_SVE_ZREG(14, 0),
661 KVM_REG_ARM64_SVE_ZREG(15, 0),
662 KVM_REG_ARM64_SVE_ZREG(16, 0),
663 KVM_REG_ARM64_SVE_ZREG(17, 0),
664 KVM_REG_ARM64_SVE_ZREG(18, 0),
665 KVM_REG_ARM64_SVE_ZREG(19, 0),
666 KVM_REG_ARM64_SVE_ZREG(20, 0),
667 KVM_REG_ARM64_SVE_ZREG(21, 0),
668 KVM_REG_ARM64_SVE_ZREG(22, 0),
669 KVM_REG_ARM64_SVE_ZREG(23, 0),
670 KVM_REG_ARM64_SVE_ZREG(24, 0),
671 KVM_REG_ARM64_SVE_ZREG(25, 0),
672 KVM_REG_ARM64_SVE_ZREG(26, 0),
673 KVM_REG_ARM64_SVE_ZREG(27, 0),
674 KVM_REG_ARM64_SVE_ZREG(28, 0),
675 KVM_REG_ARM64_SVE_ZREG(29, 0),
676 KVM_REG_ARM64_SVE_ZREG(30, 0),
677 KVM_REG_ARM64_SVE_ZREG(31, 0),
678 KVM_REG_ARM64_SVE_PREG(0, 0),
679 KVM_REG_ARM64_SVE_PREG(1, 0),
680 KVM_REG_ARM64_SVE_PREG(2, 0),
681 KVM_REG_ARM64_SVE_PREG(3, 0),
682 KVM_REG_ARM64_SVE_PREG(4, 0),
683 KVM_REG_ARM64_SVE_PREG(5, 0),
684 KVM_REG_ARM64_SVE_PREG(6, 0),
685 KVM_REG_ARM64_SVE_PREG(7, 0),
686 KVM_REG_ARM64_SVE_PREG(8, 0),
687 KVM_REG_ARM64_SVE_PREG(9, 0),
688 KVM_REG_ARM64_SVE_PREG(10, 0),
689 KVM_REG_ARM64_SVE_PREG(11, 0),
690 KVM_REG_ARM64_SVE_PREG(12, 0),
691 KVM_REG_ARM64_SVE_PREG(13, 0),
692 KVM_REG_ARM64_SVE_PREG(14, 0),
693 KVM_REG_ARM64_SVE_PREG(15, 0),
694 KVM_REG_ARM64_SVE_FFR(0),
695 ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
696 };
697
698 static __u64 sve_rejects_set[] = {
699 KVM_REG_ARM64_SVE_VLS,
700 };
701
702 static __u64 pauth_addr_regs[] = {
703 ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
704 ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
705 ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
706 ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
707 ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
708 ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
709 ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
710 ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
711 };
712
713 static __u64 pauth_generic_regs[] = {
714 ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
715 ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
716 };
717
718 static __u64 el2_regs[] = {
719 SYS_REG(VPIDR_EL2),
720 SYS_REG(VMPIDR_EL2),
721 SYS_REG(SCTLR_EL2),
722 SYS_REG(ACTLR_EL2),
723 SYS_REG(SCTLR2_EL2),
724 SYS_REG(HCR_EL2),
725 SYS_REG(MDCR_EL2),
726 SYS_REG(CPTR_EL2),
727 SYS_REG(HSTR_EL2),
728 SYS_REG(HFGRTR_EL2),
729 SYS_REG(HFGWTR_EL2),
730 SYS_REG(HFGITR_EL2),
731 SYS_REG(HACR_EL2),
732 SYS_REG(ZCR_EL2),
733 SYS_REG(HCRX_EL2),
734 SYS_REG(TTBR0_EL2),
735 SYS_REG(TTBR1_EL2),
736 SYS_REG(TCR_EL2),
737 SYS_REG(TCR2_EL2),
738 SYS_REG(VTTBR_EL2),
739 SYS_REG(VTCR_EL2),
740 SYS_REG(VNCR_EL2),
741 SYS_REG(HDFGRTR2_EL2),
742 SYS_REG(HDFGWTR2_EL2),
743 SYS_REG(HFGRTR2_EL2),
744 SYS_REG(HFGWTR2_EL2),
745 SYS_REG(HDFGRTR_EL2),
746 SYS_REG(HDFGWTR_EL2),
747 SYS_REG(HAFGRTR_EL2),
748 SYS_REG(HFGITR2_EL2),
749 SYS_REG(SPSR_EL2),
750 SYS_REG(ELR_EL2),
751 SYS_REG(AFSR0_EL2),
752 SYS_REG(AFSR1_EL2),
753 SYS_REG(ESR_EL2),
754 SYS_REG(FAR_EL2),
755 SYS_REG(HPFAR_EL2),
756 SYS_REG(MAIR_EL2),
757 SYS_REG(PIRE0_EL2),
758 SYS_REG(PIR_EL2),
759 SYS_REG(POR_EL2),
760 SYS_REG(AMAIR_EL2),
761 SYS_REG(VBAR_EL2),
762 SYS_REG(CONTEXTIDR_EL2),
763 SYS_REG(TPIDR_EL2),
764 SYS_REG(CNTVOFF_EL2),
765 SYS_REG(CNTHCTL_EL2),
766 SYS_REG(CNTHP_CTL_EL2),
767 SYS_REG(CNTHP_CVAL_EL2),
768 SYS_REG(CNTHV_CTL_EL2),
769 SYS_REG(CNTHV_CVAL_EL2),
770 SYS_REG(SP_EL2),
771 SYS_REG(VDISR_EL2),
772 SYS_REG(VSESR_EL2),
773 };
774
775 static __u64 el2_e2h0_regs[] = {
776 /* Empty */
777 };
778
779 #define BASE_SUBLIST \
780 { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
781 #define VREGS_SUBLIST \
782 { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
783 #define PMU_SUBLIST \
784 { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
785 .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
786 #define SVE_SUBLIST \
787 { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
788 .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
789 .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
790 #define PAUTH_SUBLIST \
791 { \
792 .name = "pauth_address", \
793 .capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
794 .feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
795 .regs = pauth_addr_regs, \
796 .regs_n = ARRAY_SIZE(pauth_addr_regs), \
797 }, \
798 { \
799 .name = "pauth_generic", \
800 .capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
801 .feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
802 .regs = pauth_generic_regs, \
803 .regs_n = ARRAY_SIZE(pauth_generic_regs), \
804 }
805 #define EL2_SUBLIST \
806 { \
807 .name = "EL2", \
808 .capability = KVM_CAP_ARM_EL2, \
809 .feature = KVM_ARM_VCPU_HAS_EL2, \
810 .regs = el2_regs, \
811 .regs_n = ARRAY_SIZE(el2_regs), \
812 }
813 #define EL2_E2H0_SUBLIST \
814 EL2_SUBLIST, \
815 { \
816 .name = "EL2 E2H0", \
817 .capability = KVM_CAP_ARM_EL2_E2H0, \
818 .feature = KVM_ARM_VCPU_HAS_EL2_E2H0, \
819 .regs = el2_e2h0_regs, \
820 .regs_n = ARRAY_SIZE(el2_e2h0_regs), \
821 }
822
823 static struct vcpu_reg_list vregs_config = {
824 .sublists = {
825 BASE_SUBLIST,
826 VREGS_SUBLIST,
827 {0},
828 },
829 };
830 static struct vcpu_reg_list vregs_pmu_config = {
831 .sublists = {
832 BASE_SUBLIST,
833 VREGS_SUBLIST,
834 PMU_SUBLIST,
835 {0},
836 },
837 };
838 static struct vcpu_reg_list sve_config = {
839 .sublists = {
840 BASE_SUBLIST,
841 SVE_SUBLIST,
842 {0},
843 },
844 };
845 static struct vcpu_reg_list sve_pmu_config = {
846 .sublists = {
847 BASE_SUBLIST,
848 SVE_SUBLIST,
849 PMU_SUBLIST,
850 {0},
851 },
852 };
853 static struct vcpu_reg_list pauth_config = {
854 .sublists = {
855 BASE_SUBLIST,
856 VREGS_SUBLIST,
857 PAUTH_SUBLIST,
858 {0},
859 },
860 };
861 static struct vcpu_reg_list pauth_pmu_config = {
862 .sublists = {
863 BASE_SUBLIST,
864 VREGS_SUBLIST,
865 PAUTH_SUBLIST,
866 PMU_SUBLIST,
867 {0},
868 },
869 };
870
871 static struct vcpu_reg_list el2_vregs_config = {
872 .sublists = {
873 BASE_SUBLIST,
874 EL2_SUBLIST,
875 VREGS_SUBLIST,
876 {0},
877 },
878 };
879
880 static struct vcpu_reg_list el2_vregs_pmu_config = {
881 .sublists = {
882 BASE_SUBLIST,
883 EL2_SUBLIST,
884 VREGS_SUBLIST,
885 PMU_SUBLIST,
886 {0},
887 },
888 };
889
890 static struct vcpu_reg_list el2_sve_config = {
891 .sublists = {
892 BASE_SUBLIST,
893 EL2_SUBLIST,
894 SVE_SUBLIST,
895 {0},
896 },
897 };
898
899 static struct vcpu_reg_list el2_sve_pmu_config = {
900 .sublists = {
901 BASE_SUBLIST,
902 EL2_SUBLIST,
903 SVE_SUBLIST,
904 PMU_SUBLIST,
905 {0},
906 },
907 };
908
909 static struct vcpu_reg_list el2_pauth_config = {
910 .sublists = {
911 BASE_SUBLIST,
912 EL2_SUBLIST,
913 VREGS_SUBLIST,
914 PAUTH_SUBLIST,
915 {0},
916 },
917 };
918
919 static struct vcpu_reg_list el2_pauth_pmu_config = {
920 .sublists = {
921 BASE_SUBLIST,
922 EL2_SUBLIST,
923 VREGS_SUBLIST,
924 PAUTH_SUBLIST,
925 PMU_SUBLIST,
926 {0},
927 },
928 };
929
930 static struct vcpu_reg_list el2_e2h0_vregs_config = {
931 .sublists = {
932 BASE_SUBLIST,
933 EL2_E2H0_SUBLIST,
934 VREGS_SUBLIST,
935 {0},
936 },
937 };
938
939 static struct vcpu_reg_list el2_e2h0_vregs_pmu_config = {
940 .sublists = {
941 BASE_SUBLIST,
942 EL2_E2H0_SUBLIST,
943 VREGS_SUBLIST,
944 PMU_SUBLIST,
945 {0},
946 },
947 };
948
949 static struct vcpu_reg_list el2_e2h0_sve_config = {
950 .sublists = {
951 BASE_SUBLIST,
952 EL2_E2H0_SUBLIST,
953 SVE_SUBLIST,
954 {0},
955 },
956 };
957
958 static struct vcpu_reg_list el2_e2h0_sve_pmu_config = {
959 .sublists = {
960 BASE_SUBLIST,
961 EL2_E2H0_SUBLIST,
962 SVE_SUBLIST,
963 PMU_SUBLIST,
964 {0},
965 },
966 };
967
968 static struct vcpu_reg_list el2_e2h0_pauth_config = {
969 .sublists = {
970 BASE_SUBLIST,
971 EL2_E2H0_SUBLIST,
972 VREGS_SUBLIST,
973 PAUTH_SUBLIST,
974 {0},
975 },
976 };
977
978 static struct vcpu_reg_list el2_e2h0_pauth_pmu_config = {
979 .sublists = {
980 BASE_SUBLIST,
981 EL2_E2H0_SUBLIST,
982 VREGS_SUBLIST,
983 PAUTH_SUBLIST,
984 PMU_SUBLIST,
985 {0},
986 },
987 };
988
989 struct vcpu_reg_list *vcpu_configs[] = {
990 &vregs_config,
991 &vregs_pmu_config,
992 &sve_config,
993 &sve_pmu_config,
994 &pauth_config,
995 &pauth_pmu_config,
996
997 &el2_vregs_config,
998 &el2_vregs_pmu_config,
999 &el2_sve_config,
1000 &el2_sve_pmu_config,
1001 &el2_pauth_config,
1002 &el2_pauth_pmu_config,
1003
1004 &el2_e2h0_vregs_config,
1005 &el2_e2h0_vregs_pmu_config,
1006 &el2_e2h0_sve_config,
1007 &el2_e2h0_sve_pmu_config,
1008 &el2_e2h0_pauth_config,
1009 &el2_e2h0_pauth_pmu_config,
1010 };
1011 int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1012