xref: /linux/arch/arm64/kvm/nested.c (revision 8b40a46966d294bc64bad0feb13d3304fde738f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/kvm.h>
8 #include <linux/kvm_host.h>
9 
10 #include <asm/kvm_emulate.h>
11 #include <asm/kvm_nested.h>
12 #include <asm/sysreg.h>
13 
14 #include "sys_regs.h"
15 
16 /* Protection against the sysreg repainting madness... */
17 #define NV_FTR(r, f)		ID_AA64##r##_EL1_##f
18 
19 /*
20  * Our emulated CPU doesn't support all the possible features. For the
21  * sake of simplicity (and probably mental sanity), wipe out a number
22  * of feature bits we don't intend to support for the time being.
23  * This list should get updated as new features get added to the NV
24  * support, and new extension to the architecture.
25  */
26 static u64 limit_nv_id_reg(u32 id, u64 val)
27 {
28 	u64 tmp;
29 
30 	switch (id) {
31 	case SYS_ID_AA64ISAR0_EL1:
32 		/* Support everything but TME, O.S. and Range TLBIs */
33 		val &= ~(NV_FTR(ISAR0, TLB)		|
34 			 NV_FTR(ISAR0, TME));
35 		break;
36 
37 	case SYS_ID_AA64ISAR1_EL1:
38 		/* Support everything but PtrAuth and Spec Invalidation */
39 		val &= ~(GENMASK_ULL(63, 56)	|
40 			 NV_FTR(ISAR1, SPECRES)	|
41 			 NV_FTR(ISAR1, GPI)	|
42 			 NV_FTR(ISAR1, GPA)	|
43 			 NV_FTR(ISAR1, API)	|
44 			 NV_FTR(ISAR1, APA));
45 		break;
46 
47 	case SYS_ID_AA64PFR0_EL1:
48 		/* No AMU, MPAM, S-EL2, RAS or SVE */
49 		val &= ~(GENMASK_ULL(55, 52)	|
50 			 NV_FTR(PFR0, AMU)	|
51 			 NV_FTR(PFR0, MPAM)	|
52 			 NV_FTR(PFR0, SEL2)	|
53 			 NV_FTR(PFR0, RAS)	|
54 			 NV_FTR(PFR0, SVE)	|
55 			 NV_FTR(PFR0, EL3)	|
56 			 NV_FTR(PFR0, EL2)	|
57 			 NV_FTR(PFR0, EL1));
58 		/* 64bit EL1/EL2/EL3 only */
59 		val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001);
60 		val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001);
61 		val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001);
62 		break;
63 
64 	case SYS_ID_AA64PFR1_EL1:
65 		/* Only support SSBS */
66 		val &= NV_FTR(PFR1, SSBS);
67 		break;
68 
69 	case SYS_ID_AA64MMFR0_EL1:
70 		/* Hide ECV, ExS, Secure Memory */
71 		val &= ~(NV_FTR(MMFR0, ECV)		|
72 			 NV_FTR(MMFR0, EXS)		|
73 			 NV_FTR(MMFR0, TGRAN4_2)	|
74 			 NV_FTR(MMFR0, TGRAN16_2)	|
75 			 NV_FTR(MMFR0, TGRAN64_2)	|
76 			 NV_FTR(MMFR0, SNSMEM));
77 
78 		/* Disallow unsupported S2 page sizes */
79 		switch (PAGE_SIZE) {
80 		case SZ_64K:
81 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0001);
82 			fallthrough;
83 		case SZ_16K:
84 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0001);
85 			fallthrough;
86 		case SZ_4K:
87 			/* Support everything */
88 			break;
89 		}
90 		/*
91 		 * Since we can't support a guest S2 page size smaller than
92 		 * the host's own page size (due to KVM only populating its
93 		 * own S2 using the kernel's page size), advertise the
94 		 * limitation using FEAT_GTG.
95 		 */
96 		switch (PAGE_SIZE) {
97 		case SZ_4K:
98 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN4_2), 0b0010);
99 			fallthrough;
100 		case SZ_16K:
101 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN16_2), 0b0010);
102 			fallthrough;
103 		case SZ_64K:
104 			val |= FIELD_PREP(NV_FTR(MMFR0, TGRAN64_2), 0b0010);
105 			break;
106 		}
107 		/* Cap PARange to 48bits */
108 		tmp = FIELD_GET(NV_FTR(MMFR0, PARANGE), val);
109 		if (tmp > 0b0101) {
110 			val &= ~NV_FTR(MMFR0, PARANGE);
111 			val |= FIELD_PREP(NV_FTR(MMFR0, PARANGE), 0b0101);
112 		}
113 		break;
114 
115 	case SYS_ID_AA64MMFR1_EL1:
116 		val &= (NV_FTR(MMFR1, HCX)	|
117 			NV_FTR(MMFR1, PAN)	|
118 			NV_FTR(MMFR1, LO)	|
119 			NV_FTR(MMFR1, HPDS)	|
120 			NV_FTR(MMFR1, VH)	|
121 			NV_FTR(MMFR1, VMIDBits));
122 		break;
123 
124 	case SYS_ID_AA64MMFR2_EL1:
125 		val &= ~(NV_FTR(MMFR2, BBM)	|
126 			 NV_FTR(MMFR2, TTL)	|
127 			 GENMASK_ULL(47, 44)	|
128 			 NV_FTR(MMFR2, ST)	|
129 			 NV_FTR(MMFR2, CCIDX)	|
130 			 NV_FTR(MMFR2, VARange));
131 
132 		/* Force TTL support */
133 		val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
134 		break;
135 
136 	case SYS_ID_AA64MMFR4_EL1:
137 		val = 0;
138 		if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
139 			val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
140 					  ID_AA64MMFR4_EL1_E2H0_NI_NV1);
141 		break;
142 
143 	case SYS_ID_AA64DFR0_EL1:
144 		/* Only limited support for PMU, Debug, BPs and WPs */
145 		val &= (NV_FTR(DFR0, PMUVer)	|
146 			NV_FTR(DFR0, WRPs)	|
147 			NV_FTR(DFR0, BRPs)	|
148 			NV_FTR(DFR0, DebugVer));
149 
150 		/* Cap Debug to ARMv8.1 */
151 		tmp = FIELD_GET(NV_FTR(DFR0, DebugVer), val);
152 		if (tmp > 0b0111) {
153 			val &= ~NV_FTR(DFR0, DebugVer);
154 			val |= FIELD_PREP(NV_FTR(DFR0, DebugVer), 0b0111);
155 		}
156 		break;
157 
158 	default:
159 		/* Unknown register, just wipe it clean */
160 		val = 0;
161 		break;
162 	}
163 
164 	return val;
165 }
166 
167 u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
168 {
169 	u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
170 	struct kvm_sysreg_masks *masks;
171 
172 	masks = vcpu->kvm->arch.sysreg_masks;
173 
174 	if (masks) {
175 		sr -= __VNCR_START__;
176 
177 		v &= ~masks->mask[sr].res0;
178 		v |= masks->mask[sr].res1;
179 	}
180 
181 	return v;
182 }
183 
184 static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
185 {
186 	int i = sr - __VNCR_START__;
187 
188 	kvm->arch.sysreg_masks->mask[i].res0 = res0;
189 	kvm->arch.sysreg_masks->mask[i].res1 = res1;
190 }
191 
192 int kvm_init_nv_sysregs(struct kvm *kvm)
193 {
194 	u64 res0, res1;
195 	int ret = 0;
196 
197 	mutex_lock(&kvm->arch.config_lock);
198 
199 	if (kvm->arch.sysreg_masks)
200 		goto out;
201 
202 	kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
203 					 GFP_KERNEL);
204 	if (!kvm->arch.sysreg_masks) {
205 		ret = -ENOMEM;
206 		goto out;
207 	}
208 
209 	for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
210 		kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
211 						       kvm->arch.id_regs[i]);
212 
213 	/* VTTBR_EL2 */
214 	res0 = res1 = 0;
215 	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
216 		res0 |= GENMASK(63, 56);
217 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
218 		res0 |= VTTBR_CNP_BIT;
219 	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
220 
221 	/* VTCR_EL2 */
222 	res0 = GENMASK(63, 32) | GENMASK(30, 20);
223 	res1 = BIT(31);
224 	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
225 
226 	/* VMPIDR_EL2 */
227 	res0 = GENMASK(63, 40) | GENMASK(30, 24);
228 	res1 = BIT(31);
229 	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
230 
231 	/* HCR_EL2 */
232 	res0 = BIT(48);
233 	res1 = HCR_RW;
234 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
235 		res0 |= GENMASK(63, 59);
236 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
237 		res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
238 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
239 		res0 |= (HCR_TTLBIS | HCR_TTLBOS);
240 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
241 	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
242 		res0 |= HCR_ENSCXT;
243 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
244 		res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
245 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
246 		res0 |= HCR_AMVOFFEN;
247 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
248 		res0 |= HCR_FIEN;
249 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
250 		res0 |= HCR_FWB;
251 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
252 		res0 |= HCR_NV2;
253 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
254 		res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
255 	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
256 	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
257 		res0 |= (HCR_API | HCR_APK);
258 	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
259 		res0 |= BIT(39);
260 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
261 		res0 |= (HCR_TEA | HCR_TERR);
262 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
263 		res0 |= HCR_TLOR;
264 	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
265 		res1 |= HCR_E2H;
266 	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
267 
268 	/* HCRX_EL2 */
269 	res0 = HCRX_EL2_RES0;
270 	res1 = HCRX_EL2_RES1;
271 	if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
272 		res0 |= HCRX_EL2_PACMEn;
273 	if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
274 		res0 |= HCRX_EL2_EnFPM;
275 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
276 		res0 |= HCRX_EL2_GCSEn;
277 	if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
278 		res0 |= HCRX_EL2_EnIDCP128;
279 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
280 		res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
281 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
282 		res0 |= HCRX_EL2_TMEA;
283 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
284 		res0 |= HCRX_EL2_D128En;
285 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
286 		res0 |= HCRX_EL2_PTTWI;
287 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
288 		res0 |= HCRX_EL2_SCTLR2En;
289 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
290 		res0 |= HCRX_EL2_TCR2En;
291 	if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
292 		res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
293 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
294 		res0 |= HCRX_EL2_CMOW;
295 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
296 		res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
297 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
298 	    !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
299 		res0 |= HCRX_EL2_SMPME;
300 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
301 		res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
302 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
303 		res0 |= HCRX_EL2_EnASR;
304 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
305 		res0 |= HCRX_EL2_EnALS;
306 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
307 		res0 |= HCRX_EL2_EnAS0;
308 	set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
309 
310 	/* HFG[RW]TR_EL2 */
311 	res0 = res1 = 0;
312 	if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
313 	      __vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
314 		res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
315 			 HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
316 			 HFGxTR_EL2_APIBKey);
317 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
318 		res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
319 			 HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
320 			 HFGxTR_EL2_LORSA_EL1);
321 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
322 	    !kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
323 		res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
324 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
325 		res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
326 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
327 		res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
328 			 HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
329 			 HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
330 			 HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
331 			 HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
332 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
333 		res0 |= HFGxTR_EL2_nACCDATA_EL1;
334 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
335 		res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
336 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
337 		res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
338 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
339 		res0 |= HFGxTR_EL2_nRCWMASK_EL1;
340 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
341 		res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
342 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
343 		res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
344 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
345 		res0 |= HFGxTR_EL2_nS2POR_EL1;
346 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
347 		res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
348 	set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
349 	set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
350 
351 	/* HDFG[RW]TR_EL2 */
352 	res0 = res1 = 0;
353 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
354 		res0 |= HDFGRTR_EL2_OSDLR_EL1;
355 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
356 		res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
357 			 HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
358 			 HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
359 			 HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
360 			 HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
361 			 HDFGRTR_EL2_PMCEIDn_EL0);
362 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
363 		res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
364 			 HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
365 			 HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
366 			 HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
367 			 HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
368 			 HDFGRTR_EL2_PMBIDR_EL1);
369 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
370 		res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
371 			 HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
372 			 HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
373 			 HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
374 			 HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
375 			 HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
376 			 HDFGRTR_EL2_TRCVICTLR);
377 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
378 		res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
379 			 HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
380 			 HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
381 			 HDFGRTR_EL2_TRBTRG_EL1);
382 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
383 		res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
384 			 HDFGRTR_EL2_nBRBDATA);
385 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
386 		res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
387 	set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
388 
389 	/* Reuse the bits from the read-side and add the write-specific stuff */
390 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
391 		res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
392 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
393 		res0 |= HDFGWTR_EL2_TRCOSLAR;
394 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
395 		res0 |= HDFGWTR_EL2_TRFCR_EL1;
396 	set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
397 
398 	/* HFGITR_EL2 */
399 	res0 = HFGITR_EL2_RES0;
400 	res1 = HFGITR_EL2_RES1;
401 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
402 		res0 |= HFGITR_EL2_DCCVADP;
403 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
404 		res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
405 	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
406 		res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
407 			 HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
408 			 HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
409 			 HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
410 			 HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
411 	if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
412 		res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
413 			 HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
414 			 HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
415 			 HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
416 			 HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
417 			 HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
418 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
419 		res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
420 			 HFGITR_EL2_CPPRCTX);
421 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
422 		res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
423 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
424 		res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
425 			 HFGITR_EL2_nGCSEPP);
426 	if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
427 		res0 |= HFGITR_EL2_COSPRCTX;
428 	if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
429 		res0 |= HFGITR_EL2_ATS1E1A;
430 	set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
431 
432 	/* HAFGRTR_EL2 - not a lot to see here */
433 	res0 = HAFGRTR_EL2_RES0;
434 	res1 = HAFGRTR_EL2_RES1;
435 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
436 		res0 |= ~(res0 | res1);
437 	set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
438 out:
439 	mutex_unlock(&kvm->arch.config_lock);
440 
441 	return ret;
442 }
443