xref: /linux/arch/arm64/include/asm/kvm_emulate.h (revision 2b0cfa6e49566c8fa6759734cf821aa6e8271a9e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13 
14 #include <linux/kvm_host.h>
15 
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/kvm_nested.h>
21 #include <asm/ptrace.h>
22 #include <asm/cputype.h>
23 #include <asm/virt.h>
24 
25 #define CURRENT_EL_SP_EL0_VECTOR	0x0
26 #define CURRENT_EL_SP_ELx_VECTOR	0x200
27 #define LOWER_EL_AArch64_VECTOR		0x400
28 #define LOWER_EL_AArch32_VECTOR		0x600
29 
30 enum exception_type {
31 	except_type_sync	= 0,
32 	except_type_irq		= 0x80,
33 	except_type_fiq		= 0x100,
34 	except_type_serror	= 0x180,
35 };
36 
37 #define kvm_exception_type_names		\
38 	{ except_type_sync,	"SYNC"   },	\
39 	{ except_type_irq,	"IRQ"    },	\
40 	{ except_type_fiq,	"FIQ"    },	\
41 	{ except_type_serror,	"SERROR" }
42 
43 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
44 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
45 
46 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
47 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
48 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
49 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
50 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
51 
52 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
53 
54 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
55 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
56 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
57 
58 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
59 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
60 {
61 	return !(vcpu->arch.hcr_el2 & HCR_RW);
62 }
63 #else
64 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
65 {
66 	return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
67 }
68 #endif
69 
70 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
71 {
72 	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
73 	if (has_vhe() || has_hvhe())
74 		vcpu->arch.hcr_el2 |= HCR_E2H;
75 	if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
76 		/* route synchronous external abort exceptions to EL2 */
77 		vcpu->arch.hcr_el2 |= HCR_TEA;
78 		/* trap error record accesses */
79 		vcpu->arch.hcr_el2 |= HCR_TERR;
80 	}
81 
82 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) {
83 		vcpu->arch.hcr_el2 |= HCR_FWB;
84 	} else {
85 		/*
86 		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
87 		 * get set in SCTLR_EL1 such that we can detect when the guest
88 		 * MMU gets turned on and do the necessary cache maintenance
89 		 * then.
90 		 */
91 		vcpu->arch.hcr_el2 |= HCR_TVM;
92 	}
93 
94 	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
95 	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
96 		vcpu->arch.hcr_el2 |= HCR_TID4;
97 	else
98 		vcpu->arch.hcr_el2 |= HCR_TID2;
99 
100 	if (vcpu_el1_is_32bit(vcpu))
101 		vcpu->arch.hcr_el2 &= ~HCR_RW;
102 
103 	if (kvm_has_mte(vcpu->kvm))
104 		vcpu->arch.hcr_el2 |= HCR_ATA;
105 }
106 
107 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
108 {
109 	return (unsigned long *)&vcpu->arch.hcr_el2;
110 }
111 
112 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
113 {
114 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
115 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
116 	    vcpu->kvm->arch.vgic.nassgireq)
117 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
118 	else
119 		vcpu->arch.hcr_el2 |= HCR_TWI;
120 }
121 
122 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
123 {
124 	vcpu->arch.hcr_el2 |= HCR_TWE;
125 	vcpu->arch.hcr_el2 |= HCR_TWI;
126 }
127 
128 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
129 {
130 	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
131 }
132 
133 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
134 {
135 	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
136 }
137 
138 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
139 {
140 	return vcpu->arch.vsesr_el2;
141 }
142 
143 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
144 {
145 	vcpu->arch.vsesr_el2 = vsesr;
146 }
147 
148 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
149 {
150 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
151 }
152 
153 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
154 {
155 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
156 }
157 
158 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
159 {
160 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
161 }
162 
163 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
164 {
165 	if (vcpu_mode_is_32bit(vcpu))
166 		return kvm_condition_valid32(vcpu);
167 
168 	return true;
169 }
170 
171 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
172 {
173 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
174 }
175 
176 /*
177  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
178  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
179  * AArch32 with banked registers.
180  */
181 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
182 					 u8 reg_num)
183 {
184 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
185 }
186 
187 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
188 				unsigned long val)
189 {
190 	if (reg_num != 31)
191 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
192 }
193 
194 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
195 {
196 	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
197 	case PSR_MODE_EL2h:
198 	case PSR_MODE_EL2t:
199 		return true;
200 	default:
201 		return false;
202 	}
203 }
204 
205 static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
206 {
207 	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
208 }
209 
210 static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
211 {
212 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
213 }
214 
215 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
216 {
217 	return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
218 }
219 
220 static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
221 {
222 	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
223 }
224 
225 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
226 {
227 	return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
228 }
229 
230 static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
231 {
232 	/*
233 	 * We are in a hypervisor context if the vcpu mode is EL2 or
234 	 * E2H and TGE bits are set. The latter means we are in the user space
235 	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
236 	 *
237 	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
238 	 * rest of the KVM code, and will result in a misbehaving guest.
239 	 */
240 	return vcpu_is_el2_ctxt(ctxt) ||
241 		(__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
242 		__vcpu_el2_tge_is_set(ctxt);
243 }
244 
245 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
246 {
247 	return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
248 }
249 
250 /*
251  * The layout of SPSR for an AArch32 state is different when observed from an
252  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
253  * view given an AArch64 view.
254  *
255  * In ARM DDI 0487E.a see:
256  *
257  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
258  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
259  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
260  *
261  * Which show the following differences:
262  *
263  * | Bit | AA64 | AA32 | Notes                       |
264  * +-----+------+------+-----------------------------|
265  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
266  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
267  *
268  * ... and all other bits are (currently) common.
269  */
270 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
271 {
272 	const unsigned long overlap = BIT(24) | BIT(21);
273 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
274 
275 	spsr &= ~overlap;
276 
277 	spsr |= dit << 21;
278 
279 	return spsr;
280 }
281 
282 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
283 {
284 	u32 mode;
285 
286 	if (vcpu_mode_is_32bit(vcpu)) {
287 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
288 		return mode > PSR_AA32_MODE_USR;
289 	}
290 
291 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
292 
293 	return mode != PSR_MODE_EL0t;
294 }
295 
296 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
297 {
298 	return vcpu->arch.fault.esr_el2;
299 }
300 
301 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
302 {
303 	u64 esr = kvm_vcpu_get_esr(vcpu);
304 
305 	if (esr & ESR_ELx_CV)
306 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
307 
308 	return -1;
309 }
310 
311 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
312 {
313 	return vcpu->arch.fault.far_el2;
314 }
315 
316 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
317 {
318 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
319 }
320 
321 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
322 {
323 	return vcpu->arch.fault.disr_el1;
324 }
325 
326 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
327 {
328 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
329 }
330 
331 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
332 {
333 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
334 }
335 
336 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
337 {
338 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
339 }
340 
341 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
342 {
343 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
344 }
345 
346 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
347 {
348 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
349 }
350 
351 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
352 {
353 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
354 }
355 
356 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
357 {
358 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
359 }
360 
361 /* Always check for S1PTW *before* using this. */
362 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
363 {
364 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
365 }
366 
367 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
368 {
369 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
370 }
371 
372 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
373 {
374 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
375 }
376 
377 /* This one is not specific to Data Abort */
378 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
379 {
380 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
381 }
382 
383 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
384 {
385 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
386 }
387 
388 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
389 {
390 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
391 }
392 
393 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
394 {
395 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
396 }
397 
398 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
399 {
400 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
401 }
402 
403 static inline
404 bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
405 {
406 	return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
407 }
408 
409 static inline
410 bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
411 {
412 	return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
413 }
414 
415 static inline
416 u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
417 {
418 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
419 
420 	BUG_ON(!esr_fsc_is_permission_fault(esr));
421 	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
422 }
423 
424 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
425 {
426 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
427 	case ESR_ELx_FSC_EXTABT:
428 	case ESR_ELx_FSC_SEA_TTW0:
429 	case ESR_ELx_FSC_SEA_TTW1:
430 	case ESR_ELx_FSC_SEA_TTW2:
431 	case ESR_ELx_FSC_SEA_TTW3:
432 	case ESR_ELx_FSC_SECC:
433 	case ESR_ELx_FSC_SECC_TTW0:
434 	case ESR_ELx_FSC_SECC_TTW1:
435 	case ESR_ELx_FSC_SECC_TTW2:
436 	case ESR_ELx_FSC_SECC_TTW3:
437 		return true;
438 	default:
439 		return false;
440 	}
441 }
442 
443 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
444 {
445 	u64 esr = kvm_vcpu_get_esr(vcpu);
446 	return ESR_ELx_SYS64_ISS_RT(esr);
447 }
448 
449 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
450 {
451 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
452 		/*
453 		 * Only a permission fault on a S1PTW should be
454 		 * considered as a write. Otherwise, page tables baked
455 		 * in a read-only memslot will result in an exception
456 		 * being delivered in the guest.
457 		 *
458 		 * The drawback is that we end-up faulting twice if the
459 		 * guest is using any of HW AF/DB: a translation fault
460 		 * to map the page containing the PT (read only at
461 		 * first), then a permission fault to allow the flags
462 		 * to be set.
463 		 */
464 		return kvm_vcpu_trap_is_permission_fault(vcpu);
465 	}
466 
467 	if (kvm_vcpu_trap_is_iabt(vcpu))
468 		return false;
469 
470 	return kvm_vcpu_dabt_iswrite(vcpu);
471 }
472 
473 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
474 {
475 	return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
476 }
477 
478 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
479 {
480 	if (vcpu_mode_is_32bit(vcpu)) {
481 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
482 	} else {
483 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
484 		sctlr |= SCTLR_ELx_EE;
485 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
486 	}
487 }
488 
489 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
490 {
491 	if (vcpu_mode_is_32bit(vcpu))
492 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
493 
494 	if (vcpu_mode_priv(vcpu))
495 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
496 	else
497 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
498 }
499 
500 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
501 						    unsigned long data,
502 						    unsigned int len)
503 {
504 	if (kvm_vcpu_is_be(vcpu)) {
505 		switch (len) {
506 		case 1:
507 			return data & 0xff;
508 		case 2:
509 			return be16_to_cpu(data & 0xffff);
510 		case 4:
511 			return be32_to_cpu(data & 0xffffffff);
512 		default:
513 			return be64_to_cpu(data);
514 		}
515 	} else {
516 		switch (len) {
517 		case 1:
518 			return data & 0xff;
519 		case 2:
520 			return le16_to_cpu(data & 0xffff);
521 		case 4:
522 			return le32_to_cpu(data & 0xffffffff);
523 		default:
524 			return le64_to_cpu(data);
525 		}
526 	}
527 
528 	return data;		/* Leave LE untouched */
529 }
530 
531 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
532 						    unsigned long data,
533 						    unsigned int len)
534 {
535 	if (kvm_vcpu_is_be(vcpu)) {
536 		switch (len) {
537 		case 1:
538 			return data & 0xff;
539 		case 2:
540 			return cpu_to_be16(data & 0xffff);
541 		case 4:
542 			return cpu_to_be32(data & 0xffffffff);
543 		default:
544 			return cpu_to_be64(data);
545 		}
546 	} else {
547 		switch (len) {
548 		case 1:
549 			return data & 0xff;
550 		case 2:
551 			return cpu_to_le16(data & 0xffff);
552 		case 4:
553 			return cpu_to_le32(data & 0xffffffff);
554 		default:
555 			return cpu_to_le64(data);
556 		}
557 	}
558 
559 	return data;		/* Leave LE untouched */
560 }
561 
562 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
563 {
564 	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
565 	vcpu_set_flag(vcpu, INCREMENT_PC);
566 }
567 
568 #define kvm_pend_exception(v, e)					\
569 	do {								\
570 		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
571 		vcpu_set_flag((v), PENDING_EXCEPTION);			\
572 		vcpu_set_flag((v), e);					\
573 	} while (0)
574 
575 static __always_inline void kvm_write_cptr_el2(u64 val)
576 {
577 	if (has_vhe() || has_hvhe())
578 		write_sysreg(val, cpacr_el1);
579 	else
580 		write_sysreg(val, cptr_el2);
581 }
582 
583 static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
584 {
585 	u64 val;
586 
587 	if (has_vhe()) {
588 		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
589 		       CPACR_EL1_ZEN_EL1EN);
590 		if (cpus_have_final_cap(ARM64_SME))
591 			val |= CPACR_EL1_SMEN_EL1EN;
592 	} else if (has_hvhe()) {
593 		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
594 
595 		if (!vcpu_has_sve(vcpu) ||
596 		    (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
597 			val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
598 		if (cpus_have_final_cap(ARM64_SME))
599 			val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
600 	} else {
601 		val = CPTR_NVHE_EL2_RES1;
602 
603 		if (vcpu_has_sve(vcpu) &&
604 		    (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
605 			val |= CPTR_EL2_TZ;
606 		if (cpus_have_final_cap(ARM64_SME))
607 			val &= ~CPTR_EL2_TSM;
608 	}
609 
610 	return val;
611 }
612 
613 static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
614 {
615 	u64 val = kvm_get_reset_cptr_el2(vcpu);
616 
617 	kvm_write_cptr_el2(val);
618 }
619 #endif /* __ARM64_KVM_EMULATE_H__ */
620