xref: /linux/arch/arm64/kvm/at.c (revision 949d0a46ad1b9ab3450fb6ed69ff1e3e13c657bd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Linaro Ltd
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/kvm_host.h>
8 
9 #include <asm/esr.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 
fail_s1_walk(struct s1_walk_result * wr,u8 fst,bool s1ptw)13 static void fail_s1_walk(struct s1_walk_result *wr, u8 fst, bool s1ptw)
14 {
15 	wr->fst		= fst;
16 	wr->ptw		= s1ptw;
17 	wr->s2		= s1ptw;
18 	wr->failed	= true;
19 }
20 
21 #define S1_MMU_DISABLED		(-127)
22 
get_ia_size(struct s1_walk_info * wi)23 static int get_ia_size(struct s1_walk_info *wi)
24 {
25 	return 64 - wi->txsz;
26 }
27 
28 /* Return true if the IPA is out of the OA range */
check_output_size(u64 ipa,struct s1_walk_info * wi)29 static bool check_output_size(u64 ipa, struct s1_walk_info *wi)
30 {
31 	if (wi->pa52bit)
32 		return wi->max_oa_bits < 52 && (ipa & GENMASK_ULL(51, wi->max_oa_bits));
33 	return wi->max_oa_bits < 48 && (ipa & GENMASK_ULL(47, wi->max_oa_bits));
34 }
35 
has_52bit_pa(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,u64 tcr)36 static bool has_52bit_pa(struct kvm_vcpu *vcpu, struct s1_walk_info *wi, u64 tcr)
37 {
38 	switch (BIT(wi->pgshift)) {
39 	case SZ_64K:
40 	default:		/* IMPDEF: treat any other value as 64k */
41 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, PARANGE, 52))
42 			return false;
43 		return ((wi->regime == TR_EL2 ?
44 			 FIELD_GET(TCR_EL2_PS_MASK, tcr) :
45 			 FIELD_GET(TCR_IPS_MASK, tcr)) == 0b0110);
46 	case SZ_16K:
47 		if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN16, 52_BIT))
48 			return false;
49 		break;
50 	case SZ_4K:
51 		if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, TGRAN4, 52_BIT))
52 			return false;
53 		break;
54 	}
55 
56 	return (tcr & (wi->regime == TR_EL2 ? TCR_EL2_DS : TCR_DS));
57 }
58 
desc_to_oa(struct s1_walk_info * wi,u64 desc)59 static u64 desc_to_oa(struct s1_walk_info *wi, u64 desc)
60 {
61 	u64 addr;
62 
63 	if (!wi->pa52bit)
64 		return desc & GENMASK_ULL(47, wi->pgshift);
65 
66 	switch (BIT(wi->pgshift)) {
67 	case SZ_4K:
68 	case SZ_16K:
69 		addr = desc & GENMASK_ULL(49, wi->pgshift);
70 		addr |= FIELD_GET(KVM_PTE_ADDR_51_50_LPA2, desc) << 50;
71 		break;
72 	case SZ_64K:
73 	default:	    /* IMPDEF: treat any other value as 64k */
74 		addr = desc & GENMASK_ULL(47, wi->pgshift);
75 		addr |= FIELD_GET(KVM_PTE_ADDR_51_48, desc) << 48;
76 		break;
77 	}
78 
79 	return addr;
80 }
81 
82 /* Return the translation regime that applies to an AT instruction */
compute_translation_regime(struct kvm_vcpu * vcpu,u32 op)83 static enum trans_regime compute_translation_regime(struct kvm_vcpu *vcpu, u32 op)
84 {
85 	/*
86 	 * We only get here from guest EL2, so the translation
87 	 * regime AT applies to is solely defined by {E2H,TGE}.
88 	 */
89 	switch (op) {
90 	case OP_AT_S1E2R:
91 	case OP_AT_S1E2W:
92 	case OP_AT_S1E2A:
93 		return vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
94 	default:
95 		return (vcpu_el2_e2h_is_set(vcpu) &&
96 			vcpu_el2_tge_is_set(vcpu)) ? TR_EL20 : TR_EL10;
97 	}
98 }
99 
effective_tcr2(struct kvm_vcpu * vcpu,enum trans_regime regime)100 static u64 effective_tcr2(struct kvm_vcpu *vcpu, enum trans_regime regime)
101 {
102 	if (regime == TR_EL10) {
103 		if (vcpu_has_nv(vcpu) &&
104 		    !(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TCR2En))
105 			return 0;
106 
107 		return vcpu_read_sys_reg(vcpu, TCR2_EL1);
108 	}
109 
110 	return vcpu_read_sys_reg(vcpu, TCR2_EL2);
111 }
112 
s1pie_enabled(struct kvm_vcpu * vcpu,enum trans_regime regime)113 static bool s1pie_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
114 {
115 	if (!kvm_has_s1pie(vcpu->kvm))
116 		return false;
117 
118 	/* Abuse TCR2_EL1_PIE and use it for EL2 as well */
119 	return effective_tcr2(vcpu, regime) & TCR2_EL1_PIE;
120 }
121 
compute_s1poe(struct kvm_vcpu * vcpu,struct s1_walk_info * wi)122 static void compute_s1poe(struct kvm_vcpu *vcpu, struct s1_walk_info *wi)
123 {
124 	u64 val;
125 
126 	if (!kvm_has_s1poe(vcpu->kvm)) {
127 		wi->poe = wi->e0poe = false;
128 		return;
129 	}
130 
131 	val = effective_tcr2(vcpu, wi->regime);
132 
133 	/* Abuse TCR2_EL1_* for EL2 */
134 	wi->poe = val & TCR2_EL1_POE;
135 	wi->e0poe = (wi->regime != TR_EL2) && (val & TCR2_EL1_E0POE);
136 }
137 
setup_s1_walk(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr,u64 va)138 static int setup_s1_walk(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
139 			 struct s1_walk_result *wr, u64 va)
140 {
141 	u64 hcr, sctlr, tcr, tg, ps, ia_bits, ttbr;
142 	unsigned int stride, x;
143 	bool va55, tbi, lva;
144 
145 	va55 = va & BIT(55);
146 
147 	if (vcpu_has_nv(vcpu)) {
148 		hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
149 		wi->s2 = wi->regime == TR_EL10 && (hcr & (HCR_VM | HCR_DC));
150 	} else {
151 		WARN_ON_ONCE(wi->regime != TR_EL10);
152 		wi->s2 = false;
153 		hcr = 0;
154 	}
155 
156 	switch (wi->regime) {
157 	case TR_EL10:
158 		sctlr	= vcpu_read_sys_reg(vcpu, SCTLR_EL1);
159 		tcr	= vcpu_read_sys_reg(vcpu, TCR_EL1);
160 		ttbr	= (va55 ?
161 			   vcpu_read_sys_reg(vcpu, TTBR1_EL1) :
162 			   vcpu_read_sys_reg(vcpu, TTBR0_EL1));
163 		break;
164 	case TR_EL2:
165 	case TR_EL20:
166 		sctlr	= vcpu_read_sys_reg(vcpu, SCTLR_EL2);
167 		tcr	= vcpu_read_sys_reg(vcpu, TCR_EL2);
168 		ttbr	= (va55 ?
169 			   vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
170 			   vcpu_read_sys_reg(vcpu, TTBR0_EL2));
171 		break;
172 	default:
173 		BUG();
174 	}
175 
176 	/* Someone was silly enough to encode TG0/TG1 differently */
177 	if (va55 && wi->regime != TR_EL2) {
178 		wi->txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
179 		tg = FIELD_GET(TCR_TG1_MASK, tcr);
180 
181 		switch (tg << TCR_TG1_SHIFT) {
182 		case TCR_TG1_4K:
183 			wi->pgshift = 12;	 break;
184 		case TCR_TG1_16K:
185 			wi->pgshift = 14;	 break;
186 		case TCR_TG1_64K:
187 		default:	    /* IMPDEF: treat any other value as 64k */
188 			wi->pgshift = 16;	 break;
189 		}
190 	} else {
191 		wi->txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
192 		tg = FIELD_GET(TCR_TG0_MASK, tcr);
193 
194 		switch (tg << TCR_TG0_SHIFT) {
195 		case TCR_TG0_4K:
196 			wi->pgshift = 12;	 break;
197 		case TCR_TG0_16K:
198 			wi->pgshift = 14;	 break;
199 		case TCR_TG0_64K:
200 		default:	    /* IMPDEF: treat any other value as 64k */
201 			wi->pgshift = 16;	 break;
202 		}
203 	}
204 
205 	wi->pa52bit = has_52bit_pa(vcpu, wi, tcr);
206 
207 	ia_bits = get_ia_size(wi);
208 
209 	/* AArch64.S1StartLevel() */
210 	stride = wi->pgshift - 3;
211 	wi->sl = 3 - (((ia_bits - 1) - wi->pgshift) / stride);
212 
213 	if (wi->regime == TR_EL2 && va55)
214 		goto addrsz;
215 
216 	tbi = (wi->regime == TR_EL2 ?
217 	       FIELD_GET(TCR_EL2_TBI, tcr) :
218 	       (va55 ?
219 		FIELD_GET(TCR_TBI1, tcr) :
220 		FIELD_GET(TCR_TBI0, tcr)));
221 
222 	if (!tbi && (u64)sign_extend64(va, 55) != va)
223 		goto addrsz;
224 
225 	wi->sh = (wi->regime == TR_EL2 ?
226 		  FIELD_GET(TCR_EL2_SH0_MASK, tcr) :
227 		  (va55 ?
228 		   FIELD_GET(TCR_SH1_MASK, tcr) :
229 		   FIELD_GET(TCR_SH0_MASK, tcr)));
230 
231 	va = (u64)sign_extend64(va, 55);
232 
233 	/* Let's put the MMU disabled case aside immediately */
234 	switch (wi->regime) {
235 	case TR_EL10:
236 		/*
237 		 * If dealing with the EL1&0 translation regime, 3 things
238 		 * can disable the S1 translation:
239 		 *
240 		 * - HCR_EL2.DC = 1
241 		 * - HCR_EL2.{E2H,TGE} = {0,1}
242 		 * - SCTLR_EL1.M = 0
243 		 *
244 		 * The TGE part is interesting. If we have decided that this
245 		 * is EL1&0, then it means that either {E2H,TGE} == {1,0} or
246 		 * {0,x}, and we only need to test for TGE == 1.
247 		 */
248 		if (hcr & (HCR_DC | HCR_TGE)) {
249 			wr->level = S1_MMU_DISABLED;
250 			break;
251 		}
252 		fallthrough;
253 	case TR_EL2:
254 	case TR_EL20:
255 		if (!(sctlr & SCTLR_ELx_M))
256 			wr->level = S1_MMU_DISABLED;
257 		break;
258 	}
259 
260 	if (wr->level == S1_MMU_DISABLED) {
261 		if (va >= BIT(kvm_get_pa_bits(vcpu->kvm)))
262 			goto addrsz;
263 
264 		wr->pa = va;
265 		return 0;
266 	}
267 
268 	wi->be = sctlr & SCTLR_ELx_EE;
269 
270 	wi->hpd  = kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, HPDS, IMP);
271 	wi->hpd &= (wi->regime == TR_EL2 ?
272 		    FIELD_GET(TCR_EL2_HPD, tcr) :
273 		    (va55 ?
274 		     FIELD_GET(TCR_HPD1, tcr) :
275 		     FIELD_GET(TCR_HPD0, tcr)));
276 	/* R_JHSVW */
277 	wi->hpd |= s1pie_enabled(vcpu, wi->regime);
278 
279 	/* Do we have POE? */
280 	compute_s1poe(vcpu, wi);
281 
282 	/* R_BVXDG */
283 	wi->hpd |= (wi->poe || wi->e0poe);
284 
285 	/* R_PLCGL, R_YXNYW */
286 	if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR2_EL1, ST, 48_47)) {
287 		if (wi->txsz > 39)
288 			goto transfault;
289 	} else {
290 		if (wi->txsz > 48 || (BIT(wi->pgshift) == SZ_64K && wi->txsz > 47))
291 			goto transfault;
292 	}
293 
294 	/* R_GTJBY, R_SXWGM */
295 	switch (BIT(wi->pgshift)) {
296 	case SZ_4K:
297 	case SZ_16K:
298 		lva = wi->pa52bit;
299 		break;
300 	case SZ_64K:
301 		lva = kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, VARange, 52);
302 		break;
303 	}
304 
305 	if ((lva && wi->txsz < 12) || (!lva && wi->txsz < 16))
306 		goto transfault;
307 
308 	/* R_YYVYV, I_THCZK */
309 	if ((!va55 && va > GENMASK(ia_bits - 1, 0)) ||
310 	    (va55 && va < GENMASK(63, ia_bits)))
311 		goto transfault;
312 
313 	/* I_ZFSYQ */
314 	if (wi->regime != TR_EL2 &&
315 	    (tcr & (va55 ? TCR_EPD1_MASK : TCR_EPD0_MASK)))
316 		goto transfault;
317 
318 	/* R_BNDVG and following statements */
319 	if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR2_EL1, E0PD, IMP) &&
320 	    wi->as_el0 && (tcr & (va55 ? TCR_E0PD1 : TCR_E0PD0)))
321 		goto transfault;
322 
323 	ps = (wi->regime == TR_EL2 ?
324 	      FIELD_GET(TCR_EL2_PS_MASK, tcr) : FIELD_GET(TCR_IPS_MASK, tcr));
325 
326 	wi->max_oa_bits = min(get_kvm_ipa_limit(), ps_to_output_size(ps, wi->pa52bit));
327 
328 	/* Compute minimal alignment */
329 	x = 3 + ia_bits - ((3 - wi->sl) * stride + wi->pgshift);
330 
331 	wi->baddr = ttbr & TTBRx_EL1_BADDR;
332 	if (wi->pa52bit) {
333 		/*
334 		 * Force the alignment on 64 bytes for top-level tables
335 		 * smaller than 8 entries, since TTBR.BADDR[5:2] are used to
336 		 * store bits [51:48] of the first level of lookup.
337 		 */
338 		x = max(x, 6);
339 
340 		wi->baddr |= FIELD_GET(GENMASK_ULL(5, 2), ttbr) << 48;
341 	}
342 
343 	/* R_VPBBF */
344 	if (check_output_size(wi->baddr, wi))
345 		goto addrsz;
346 
347 	wi->baddr &= GENMASK_ULL(wi->max_oa_bits - 1, x);
348 
349 	wi->ha  = kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, HAFDBS, AF);
350 	wi->ha &= (wi->regime == TR_EL2 ?
351 		  FIELD_GET(TCR_EL2_HA, tcr) :
352 		  FIELD_GET(TCR_HA, tcr));
353 
354 	return 0;
355 
356 addrsz:
357 	/*
358 	 * Address Size Fault level 0 to indicate it comes from TTBR.
359 	 * yes, this is an oddity.
360 	 */
361 	fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(0), false);
362 	return -EFAULT;
363 
364 transfault:
365 	/* Translation Fault on start level */
366 	fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(wi->sl), false);
367 	return -EFAULT;
368 }
369 
kvm_read_s1_desc(struct kvm_vcpu * vcpu,u64 pa,u64 * desc,struct s1_walk_info * wi)370 static int kvm_read_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 *desc,
371 			    struct s1_walk_info *wi)
372 {
373 	u64 val;
374 	int r;
375 
376 	r = kvm_read_guest(vcpu->kvm, pa, &val, sizeof(val));
377 	if (r)
378 		return r;
379 
380 	if (wi->be)
381 		*desc = be64_to_cpu((__force __be64)val);
382 	else
383 		*desc = le64_to_cpu((__force __le64)val);
384 
385 	return 0;
386 }
387 
kvm_swap_s1_desc(struct kvm_vcpu * vcpu,u64 pa,u64 old,u64 new,struct s1_walk_info * wi)388 static int kvm_swap_s1_desc(struct kvm_vcpu *vcpu, u64 pa, u64 old, u64 new,
389 			    struct s1_walk_info *wi)
390 {
391 	if (wi->be) {
392 		old = (__force u64)cpu_to_be64(old);
393 		new = (__force u64)cpu_to_be64(new);
394 	} else {
395 		old = (__force u64)cpu_to_le64(old);
396 		new = (__force u64)cpu_to_le64(new);
397 	}
398 
399 	return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
400 }
401 
walk_s1(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr,u64 va)402 static int walk_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
403 		   struct s1_walk_result *wr, u64 va)
404 {
405 	u64 va_top, va_bottom, baddr, desc, new_desc, ipa;
406 	struct kvm_s2_trans s2_trans = {};
407 	int level, stride, ret;
408 
409 	level = wi->sl;
410 	stride = wi->pgshift - 3;
411 	baddr = wi->baddr;
412 
413 	va_top = get_ia_size(wi) - 1;
414 
415 	while (1) {
416 		u64 index;
417 
418 		va_bottom = (3 - level) * stride + wi->pgshift;
419 		index = (va & GENMASK_ULL(va_top, va_bottom)) >> (va_bottom - 3);
420 
421 		ipa = baddr | index;
422 
423 		if (wi->s2) {
424 			ret = kvm_walk_nested_s2(vcpu, ipa, &s2_trans);
425 			if (ret) {
426 				fail_s1_walk(wr,
427 					     (s2_trans.esr & ~ESR_ELx_FSC_LEVEL) | level,
428 					     true);
429 				return ret;
430 			}
431 
432 			if (!kvm_s2_trans_readable(&s2_trans)) {
433 				fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level),
434 					     true);
435 
436 				return -EPERM;
437 			}
438 
439 			ipa = kvm_s2_trans_output(&s2_trans);
440 		}
441 
442 		if (wi->filter) {
443 			ret = wi->filter->fn(&(struct s1_walk_context)
444 					     {
445 						     .wi	= wi,
446 						     .table_ipa	= baddr,
447 						     .level	= level,
448 					     }, wi->filter->priv);
449 			if (ret)
450 				return ret;
451 		}
452 
453 		ret = kvm_read_s1_desc(vcpu, ipa, &desc, wi);
454 		if (ret) {
455 			fail_s1_walk(wr, ESR_ELx_FSC_SEA_TTW(level), false);
456 			return ret;
457 		}
458 
459 		new_desc = desc;
460 
461 		/* Invalid descriptor */
462 		if (!(desc & BIT(0)))
463 			goto transfault;
464 
465 		/* Block mapping, check validity down the line */
466 		if (!(desc & BIT(1)))
467 			break;
468 
469 		/* Page mapping */
470 		if (level == 3)
471 			break;
472 
473 		/* Table handling */
474 		if (!wi->hpd) {
475 			wr->APTable  |= FIELD_GET(S1_TABLE_AP, desc);
476 			wr->UXNTable |= FIELD_GET(PMD_TABLE_UXN, desc);
477 			wr->PXNTable |= FIELD_GET(PMD_TABLE_PXN, desc);
478 		}
479 
480 		baddr = desc_to_oa(wi, desc);
481 
482 		/* Check for out-of-range OA */
483 		if (check_output_size(baddr, wi))
484 			goto addrsz;
485 
486 		/* Prepare for next round */
487 		va_top = va_bottom - 1;
488 		level++;
489 	}
490 
491 	/* Block mapping, check the validity of the level */
492 	if (!(desc & BIT(1))) {
493 		bool valid_block = false;
494 
495 		switch (BIT(wi->pgshift)) {
496 		case SZ_4K:
497 			valid_block = level == 1 || level == 2 || (wi->pa52bit && level == 0);
498 			break;
499 		case SZ_16K:
500 		case SZ_64K:
501 			valid_block = level == 2 || (wi->pa52bit && level == 1);
502 			break;
503 		}
504 
505 		if (!valid_block)
506 			goto transfault;
507 	}
508 
509 	baddr = desc_to_oa(wi, desc);
510 	if (check_output_size(baddr & GENMASK(52, va_bottom), wi))
511 		goto addrsz;
512 
513 	if (wi->ha)
514 		new_desc |= PTE_AF;
515 
516 	if (new_desc != desc) {
517 		if (wi->s2 && !kvm_s2_trans_writable(&s2_trans)) {
518 			fail_s1_walk(wr, ESR_ELx_FSC_PERM_L(level), true);
519 			return -EPERM;
520 		}
521 
522 		ret = kvm_swap_s1_desc(vcpu, ipa, desc, new_desc, wi);
523 		if (ret)
524 			return ret;
525 
526 		desc = new_desc;
527 	}
528 
529 	if (!(desc & PTE_AF)) {
530 		fail_s1_walk(wr, ESR_ELx_FSC_ACCESS_L(level), false);
531 		return -EACCES;
532 	}
533 
534 	va_bottom += contiguous_bit_shift(desc, wi, level);
535 
536 	wr->failed = false;
537 	wr->level = level;
538 	wr->desc = desc;
539 	wr->pa = baddr & GENMASK(52, va_bottom);
540 	wr->pa |= va & GENMASK_ULL(va_bottom - 1, 0);
541 
542 	wr->nG = (wi->regime != TR_EL2) && (desc & PTE_NG);
543 	if (wr->nG)
544 		wr->asid = get_asid_by_regime(vcpu, wi->regime);
545 
546 	return 0;
547 
548 addrsz:
549 	fail_s1_walk(wr, ESR_ELx_FSC_ADDRSZ_L(level), false);
550 	return -EINVAL;
551 transfault:
552 	fail_s1_walk(wr, ESR_ELx_FSC_FAULT_L(level), false);
553 	return -ENOENT;
554 }
555 
556 struct mmu_config {
557 	u64	ttbr0;
558 	u64	ttbr1;
559 	u64	tcr;
560 	u64	mair;
561 	u64	tcr2;
562 	u64	pir;
563 	u64	pire0;
564 	u64	por_el0;
565 	u64	por_el1;
566 	u64	sctlr;
567 	u64	vttbr;
568 	u64	vtcr;
569 };
570 
__mmu_config_save(struct mmu_config * config)571 static void __mmu_config_save(struct mmu_config *config)
572 {
573 	config->ttbr0	= read_sysreg_el1(SYS_TTBR0);
574 	config->ttbr1	= read_sysreg_el1(SYS_TTBR1);
575 	config->tcr	= read_sysreg_el1(SYS_TCR);
576 	config->mair	= read_sysreg_el1(SYS_MAIR);
577 	if (cpus_have_final_cap(ARM64_HAS_TCR2)) {
578 		config->tcr2	= read_sysreg_el1(SYS_TCR2);
579 		if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
580 			config->pir	= read_sysreg_el1(SYS_PIR);
581 			config->pire0	= read_sysreg_el1(SYS_PIRE0);
582 		}
583 		if (system_supports_poe()) {
584 			config->por_el1	= read_sysreg_el1(SYS_POR);
585 			config->por_el0	= read_sysreg_s(SYS_POR_EL0);
586 		}
587 	}
588 	config->sctlr	= read_sysreg_el1(SYS_SCTLR);
589 	config->vttbr	= read_sysreg(vttbr_el2);
590 	config->vtcr	= read_sysreg(vtcr_el2);
591 }
592 
__mmu_config_restore(struct mmu_config * config)593 static void __mmu_config_restore(struct mmu_config *config)
594 {
595 	/*
596 	 * ARM errata 1165522 and 1530923 require TGE to be 1 before
597 	 * we update the guest state.
598 	 */
599 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
600 
601 	write_sysreg_el1(config->ttbr0,	SYS_TTBR0);
602 	write_sysreg_el1(config->ttbr1,	SYS_TTBR1);
603 	write_sysreg_el1(config->tcr,	SYS_TCR);
604 	write_sysreg_el1(config->mair,	SYS_MAIR);
605 	if (cpus_have_final_cap(ARM64_HAS_TCR2)) {
606 		write_sysreg_el1(config->tcr2, SYS_TCR2);
607 		if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
608 			write_sysreg_el1(config->pir, SYS_PIR);
609 			write_sysreg_el1(config->pire0, SYS_PIRE0);
610 		}
611 		if (system_supports_poe()) {
612 			write_sysreg_el1(config->por_el1, SYS_POR);
613 			write_sysreg_s(config->por_el0, SYS_POR_EL0);
614 		}
615 	}
616 	write_sysreg_el1(config->sctlr,	SYS_SCTLR);
617 	write_sysreg(config->vttbr,	vttbr_el2);
618 	write_sysreg(config->vtcr,	vtcr_el2);
619 }
620 
at_s1e1p_fast(struct kvm_vcpu * vcpu,u32 op,u64 vaddr)621 static bool at_s1e1p_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
622 {
623 	u64 host_pan;
624 	bool fail;
625 
626 	host_pan = read_sysreg_s(SYS_PSTATE_PAN);
627 	write_sysreg_s(*vcpu_cpsr(vcpu) & PSTATE_PAN, SYS_PSTATE_PAN);
628 
629 	switch (op) {
630 	case OP_AT_S1E1RP:
631 		fail = __kvm_at(OP_AT_S1E1RP, vaddr);
632 		break;
633 	case OP_AT_S1E1WP:
634 		fail = __kvm_at(OP_AT_S1E1WP, vaddr);
635 		break;
636 	}
637 
638 	write_sysreg_s(host_pan, SYS_PSTATE_PAN);
639 
640 	return fail;
641 }
642 
643 #define MEMATTR(ic, oc)		(MEMATTR_##oc << 4 | MEMATTR_##ic)
644 #define MEMATTR_NC		0b0100
645 #define MEMATTR_Wt		0b1000
646 #define MEMATTR_Wb		0b1100
647 #define MEMATTR_WbRaWa		0b1111
648 
649 #define MEMATTR_IS_DEVICE(m)	(((m) & GENMASK(7, 4)) == 0)
650 
s2_memattr_to_attr(u8 memattr)651 static u8 s2_memattr_to_attr(u8 memattr)
652 {
653 	memattr &= 0b1111;
654 
655 	switch (memattr) {
656 	case 0b0000:
657 	case 0b0001:
658 	case 0b0010:
659 	case 0b0011:
660 		return memattr << 2;
661 	case 0b0100:
662 		return MEMATTR(Wb, Wb);
663 	case 0b0101:
664 		return MEMATTR(NC, NC);
665 	case 0b0110:
666 		return MEMATTR(Wt, NC);
667 	case 0b0111:
668 		return MEMATTR(Wb, NC);
669 	case 0b1000:
670 		/* Reserved, assume NC */
671 		return MEMATTR(NC, NC);
672 	case 0b1001:
673 		return MEMATTR(NC, Wt);
674 	case 0b1010:
675 		return MEMATTR(Wt, Wt);
676 	case 0b1011:
677 		return MEMATTR(Wb, Wt);
678 	case 0b1100:
679 		/* Reserved, assume NC */
680 		return MEMATTR(NC, NC);
681 	case 0b1101:
682 		return MEMATTR(NC, Wb);
683 	case 0b1110:
684 		return MEMATTR(Wt, Wb);
685 	case 0b1111:
686 		return MEMATTR(Wb, Wb);
687 	default:
688 		unreachable();
689 	}
690 }
691 
combine_s1_s2_attr(u8 s1,u8 s2)692 static u8 combine_s1_s2_attr(u8 s1, u8 s2)
693 {
694 	bool transient;
695 	u8 final = 0;
696 
697 	/* Upgrade transient s1 to non-transient to simplify things */
698 	switch (s1) {
699 	case 0b0001 ... 0b0011:	/* Normal, Write-Through Transient */
700 		transient = true;
701 		s1 = MEMATTR_Wt | (s1 & GENMASK(1,0));
702 		break;
703 	case 0b0101 ... 0b0111:	/* Normal, Write-Back Transient */
704 		transient = true;
705 		s1 = MEMATTR_Wb | (s1 & GENMASK(1,0));
706 		break;
707 	default:
708 		transient = false;
709 	}
710 
711 	/* S2CombineS1AttrHints() */
712 	if ((s1 & GENMASK(3, 2)) == MEMATTR_NC ||
713 	    (s2 & GENMASK(3, 2)) == MEMATTR_NC)
714 		final = MEMATTR_NC;
715 	else if ((s1 & GENMASK(3, 2)) == MEMATTR_Wt ||
716 		 (s2 & GENMASK(3, 2)) == MEMATTR_Wt)
717 		final = MEMATTR_Wt;
718 	else
719 		final = MEMATTR_Wb;
720 
721 	if (final != MEMATTR_NC) {
722 		/* Inherit RaWa hints form S1 */
723 		if (transient) {
724 			switch (s1 & GENMASK(3, 2)) {
725 			case MEMATTR_Wt:
726 				final = 0;
727 				break;
728 			case MEMATTR_Wb:
729 				final = MEMATTR_NC;
730 				break;
731 			}
732 		}
733 
734 		final |= s1 & GENMASK(1, 0);
735 	}
736 
737 	return final;
738 }
739 
740 #define ATTR_NSH	0b00
741 #define ATTR_RSV	0b01
742 #define ATTR_OSH	0b10
743 #define ATTR_ISH	0b11
744 
compute_final_sh(u8 attr,u8 sh)745 static u8 compute_final_sh(u8 attr, u8 sh)
746 {
747 	/* Any form of device, as well as NC has SH[1:0]=0b10 */
748 	if (MEMATTR_IS_DEVICE(attr) || attr == MEMATTR(NC, NC))
749 		return ATTR_OSH;
750 
751 	if (sh == ATTR_RSV)		/* Reserved, mapped to NSH */
752 		sh = ATTR_NSH;
753 
754 	return sh;
755 }
756 
compute_s1_sh(struct s1_walk_info * wi,struct s1_walk_result * wr,u8 attr)757 static u8 compute_s1_sh(struct s1_walk_info *wi, struct s1_walk_result *wr,
758 			u8 attr)
759 {
760 	u8 sh;
761 
762 	/*
763 	 * non-52bit and LPA have their basic shareability described in the
764 	 * descriptor. LPA2 gets it from the corresponding field in TCR,
765 	 * conveniently recorded in the walk info.
766 	 */
767 	if (!wi->pa52bit || BIT(wi->pgshift) == SZ_64K)
768 		sh = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_SH, wr->desc);
769 	else
770 		sh = wi->sh;
771 
772 	return compute_final_sh(attr, sh);
773 }
774 
combine_sh(u8 s1_sh,u8 s2_sh)775 static u8 combine_sh(u8 s1_sh, u8 s2_sh)
776 {
777 	if (s1_sh == ATTR_OSH || s2_sh == ATTR_OSH)
778 		return ATTR_OSH;
779 	if (s1_sh == ATTR_ISH || s2_sh == ATTR_ISH)
780 		return ATTR_ISH;
781 
782 	return ATTR_NSH;
783 }
784 
compute_par_s12(struct kvm_vcpu * vcpu,u64 s1_par,struct kvm_s2_trans * tr)785 static u64 compute_par_s12(struct kvm_vcpu *vcpu, u64 s1_par,
786 			   struct kvm_s2_trans *tr)
787 {
788 	u8 s1_parattr, s2_memattr, final_attr, s2_sh;
789 	u64 par;
790 
791 	/* If S2 has failed to translate, report the damage */
792 	if (tr->esr) {
793 		par = SYS_PAR_EL1_RES1;
794 		par |= SYS_PAR_EL1_F;
795 		par |= SYS_PAR_EL1_S;
796 		par |= FIELD_PREP(SYS_PAR_EL1_FST, tr->esr);
797 		return par;
798 	}
799 
800 	s1_parattr = FIELD_GET(SYS_PAR_EL1_ATTR, s1_par);
801 	s2_memattr = FIELD_GET(GENMASK(5, 2), tr->desc);
802 
803 	if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_FWB) {
804 		if (!kvm_has_feat(vcpu->kvm, ID_AA64PFR2_EL1, MTEPERM, IMP))
805 			s2_memattr &= ~BIT(3);
806 
807 		/* Combination of R_VRJSW and R_RHWZM */
808 		switch (s2_memattr) {
809 		case 0b0101:
810 			if (MEMATTR_IS_DEVICE(s1_parattr))
811 				final_attr = s1_parattr;
812 			else
813 				final_attr = MEMATTR(NC, NC);
814 			break;
815 		case 0b0110:
816 		case 0b1110:
817 			final_attr = MEMATTR(WbRaWa, WbRaWa);
818 			break;
819 		case 0b0111:
820 		case 0b1111:
821 			/* Preserve S1 attribute */
822 			final_attr = s1_parattr;
823 			break;
824 		case 0b0100:
825 		case 0b1100:
826 		case 0b1101:
827 			/* Reserved, do something non-silly */
828 			final_attr = s1_parattr;
829 			break;
830 		default:
831 			/*
832 			 * MemAttr[2]=0, Device from S2.
833 			 *
834 			 * FWB does not influence the way that stage 1
835 			 * memory types and attributes are combined
836 			 * with stage 2 Device type and attributes.
837 			 */
838 			final_attr = min(s2_memattr_to_attr(s2_memattr),
839 					 s1_parattr);
840 		}
841 	} else {
842 		/* Combination of R_HMNDG, R_TNHFM and R_GQFSF */
843 		u8 s2_parattr = s2_memattr_to_attr(s2_memattr);
844 
845 		if (MEMATTR_IS_DEVICE(s1_parattr) ||
846 		    MEMATTR_IS_DEVICE(s2_parattr)) {
847 			final_attr = min(s1_parattr, s2_parattr);
848 		} else {
849 			/* At this stage, this is memory vs memory */
850 			final_attr  = combine_s1_s2_attr(s1_parattr & 0xf,
851 							 s2_parattr & 0xf);
852 			final_attr |= combine_s1_s2_attr(s1_parattr >> 4,
853 							 s2_parattr >> 4) << 4;
854 		}
855 	}
856 
857 	if ((__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_CD) &&
858 	    !MEMATTR_IS_DEVICE(final_attr))
859 		final_attr = MEMATTR(NC, NC);
860 
861 	s2_sh = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S2_SH, tr->desc);
862 
863 	par  = FIELD_PREP(SYS_PAR_EL1_ATTR, final_attr);
864 	par |= tr->output & GENMASK(47, 12);
865 	par |= FIELD_PREP(SYS_PAR_EL1_SH,
866 			  combine_sh(FIELD_GET(SYS_PAR_EL1_SH, s1_par),
867 				     compute_final_sh(final_attr, s2_sh)));
868 
869 	return par;
870 }
871 
compute_par_s1(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)872 static u64 compute_par_s1(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
873 			  struct s1_walk_result *wr)
874 {
875 	u64 par;
876 
877 	if (wr->failed) {
878 		par = SYS_PAR_EL1_RES1;
879 		par |= SYS_PAR_EL1_F;
880 		par |= FIELD_PREP(SYS_PAR_EL1_FST, wr->fst);
881 		par |= wr->ptw ? SYS_PAR_EL1_PTW : 0;
882 		par |= wr->s2 ? SYS_PAR_EL1_S : 0;
883 	} else if (wr->level == S1_MMU_DISABLED) {
884 		/* MMU off or HCR_EL2.DC == 1 */
885 		par  = SYS_PAR_EL1_NSE;
886 		par |= wr->pa & SYS_PAR_EL1_PA;
887 
888 		if (wi->regime == TR_EL10 && vcpu_has_nv(vcpu) &&
889 		    (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_DC)) {
890 			par |= FIELD_PREP(SYS_PAR_EL1_ATTR,
891 					  MEMATTR(WbRaWa, WbRaWa));
892 			par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_NSH);
893 		} else {
894 			par |= FIELD_PREP(SYS_PAR_EL1_ATTR, 0); /* nGnRnE */
895 			par |= FIELD_PREP(SYS_PAR_EL1_SH, ATTR_OSH);
896 		}
897 	} else {
898 		u64 mair, sctlr;
899 		u8 sh;
900 
901 		par  = SYS_PAR_EL1_NSE;
902 
903 		mair = (wi->regime == TR_EL10 ?
904 			vcpu_read_sys_reg(vcpu, MAIR_EL1) :
905 			vcpu_read_sys_reg(vcpu, MAIR_EL2));
906 
907 		mair >>= FIELD_GET(PTE_ATTRINDX_MASK, wr->desc) * 8;
908 		mair &= 0xff;
909 
910 		sctlr = (wi->regime == TR_EL10 ?
911 			 vcpu_read_sys_reg(vcpu, SCTLR_EL1) :
912 			 vcpu_read_sys_reg(vcpu, SCTLR_EL2));
913 
914 		/* Force NC for memory if SCTLR_ELx.C is clear */
915 		if (!(sctlr & SCTLR_EL1_C) && !MEMATTR_IS_DEVICE(mair))
916 			mair = MEMATTR(NC, NC);
917 
918 		par |= FIELD_PREP(SYS_PAR_EL1_ATTR, mair);
919 		par |= wr->pa & SYS_PAR_EL1_PA;
920 
921 		sh = compute_s1_sh(wi, wr, mair);
922 		par |= FIELD_PREP(SYS_PAR_EL1_SH, sh);
923 	}
924 
925 	return par;
926 }
927 
pan3_enabled(struct kvm_vcpu * vcpu,enum trans_regime regime)928 static bool pan3_enabled(struct kvm_vcpu *vcpu, enum trans_regime regime)
929 {
930 	u64 sctlr;
931 
932 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
933 		return false;
934 
935 	if (s1pie_enabled(vcpu, regime))
936 		return true;
937 
938 	if (regime == TR_EL10)
939 		sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
940 	else
941 		sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
942 
943 	return sctlr & SCTLR_EL1_EPAN;
944 }
945 
compute_s1_direct_permissions(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)946 static void compute_s1_direct_permissions(struct kvm_vcpu *vcpu,
947 					  struct s1_walk_info *wi,
948 					  struct s1_walk_result *wr)
949 {
950 	bool wxn;
951 
952 	/* Non-hierarchical part of AArch64.S1DirectBasePermissions() */
953 	if (wi->regime != TR_EL2) {
954 		switch (FIELD_GET(PTE_USER | PTE_RDONLY, wr->desc)) {
955 		case 0b00:
956 			wr->pr = wr->pw = true;
957 			wr->ur = wr->uw = false;
958 			break;
959 		case 0b01:
960 			wr->pr = wr->pw = wr->ur = wr->uw = true;
961 			break;
962 		case 0b10:
963 			wr->pr = true;
964 			wr->pw = wr->ur = wr->uw = false;
965 			break;
966 		case 0b11:
967 			wr->pr = wr->ur = true;
968 			wr->pw = wr->uw = false;
969 			break;
970 		}
971 
972 		/* We don't use px for anything yet, but hey... */
973 		wr->px = !((wr->desc & PTE_PXN) || wr->uw);
974 		wr->ux = !(wr->desc & PTE_UXN);
975 	} else {
976 		wr->ur = wr->uw = wr->ux = false;
977 
978 		if (!(wr->desc & PTE_RDONLY)) {
979 			wr->pr = wr->pw = true;
980 		} else {
981 			wr->pr = true;
982 			wr->pw = false;
983 		}
984 
985 		/* XN maps to UXN */
986 		wr->px = !(wr->desc & PTE_UXN);
987 	}
988 
989 	switch (wi->regime) {
990 	case TR_EL2:
991 	case TR_EL20:
992 		wxn = (vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_WXN);
993 		break;
994 	case TR_EL10:
995 		wxn = (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_WXN);
996 		break;
997 	}
998 
999 	wr->pwxn = wr->uwxn = wxn;
1000 	wr->pov = wi->poe;
1001 	wr->uov = wi->e0poe;
1002 }
1003 
compute_s1_hierarchical_permissions(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)1004 static void compute_s1_hierarchical_permissions(struct kvm_vcpu *vcpu,
1005 						struct s1_walk_info *wi,
1006 						struct s1_walk_result *wr)
1007 {
1008 	/* Hierarchical part of AArch64.S1DirectBasePermissions() */
1009 	if (wi->regime != TR_EL2) {
1010 		switch (wr->APTable) {
1011 		case 0b00:
1012 			break;
1013 		case 0b01:
1014 			wr->ur = wr->uw = false;
1015 			break;
1016 		case 0b10:
1017 			wr->pw = wr->uw = false;
1018 			break;
1019 		case 0b11:
1020 			wr->pw = wr->ur = wr->uw = false;
1021 			break;
1022 		}
1023 
1024 		wr->px &= !wr->PXNTable;
1025 		wr->ux &= !wr->UXNTable;
1026 	} else {
1027 		if (wr->APTable & BIT(1))
1028 			wr->pw = false;
1029 
1030 		/* XN maps to UXN */
1031 		wr->px &= !wr->UXNTable;
1032 	}
1033 }
1034 
1035 #define perm_idx(v, r, i)	((vcpu_read_sys_reg((v), (r)) >> ((i) * 4)) & 0xf)
1036 
1037 #define set_priv_perms(wr, r, w, x)	\
1038 	do {				\
1039 		(wr)->pr = (r);		\
1040 		(wr)->pw = (w);		\
1041 		(wr)->px = (x);		\
1042 	} while (0)
1043 
1044 #define set_unpriv_perms(wr, r, w, x)	\
1045 	do {				\
1046 		(wr)->ur = (r);		\
1047 		(wr)->uw = (w);		\
1048 		(wr)->ux = (x);		\
1049 	} while (0)
1050 
1051 #define set_priv_wxn(wr, v)		\
1052 	do {				\
1053 		(wr)->pwxn = (v);	\
1054 	} while (0)
1055 
1056 #define set_unpriv_wxn(wr, v)		\
1057 	do {				\
1058 		(wr)->uwxn = (v);	\
1059 	} while (0)
1060 
1061 /* Similar to AArch64.S1IndirectBasePermissions(), without GCS  */
1062 #define set_perms(w, wr, ip)						\
1063 	do {								\
1064 		/* R_LLZDZ */						\
1065 		switch ((ip)) {						\
1066 		case 0b0000:						\
1067 			set_ ## w ## _perms((wr), false, false, false);	\
1068 			break;						\
1069 		case 0b0001:						\
1070 			set_ ## w ## _perms((wr), true , false, false);	\
1071 			break;						\
1072 		case 0b0010:						\
1073 			set_ ## w ## _perms((wr), false, false, true );	\
1074 			break;						\
1075 		case 0b0011:						\
1076 			set_ ## w ## _perms((wr), true , false, true );	\
1077 			break;						\
1078 		case 0b0100:						\
1079 			set_ ## w ## _perms((wr), false, false, false);	\
1080 			break;						\
1081 		case 0b0101:						\
1082 			set_ ## w ## _perms((wr), true , true , false);	\
1083 			break;						\
1084 		case 0b0110:						\
1085 			set_ ## w ## _perms((wr), true , true , true );	\
1086 			break;						\
1087 		case 0b0111:						\
1088 			set_ ## w ## _perms((wr), true , true , true );	\
1089 			break;						\
1090 		case 0b1000:						\
1091 			set_ ## w ## _perms((wr), true , false, false);	\
1092 			break;						\
1093 		case 0b1001:						\
1094 			set_ ## w ## _perms((wr), true , false, false);	\
1095 			break;						\
1096 		case 0b1010:						\
1097 			set_ ## w ## _perms((wr), true , false, true );	\
1098 			break;						\
1099 		case 0b1011:						\
1100 			set_ ## w ## _perms((wr), false, false, false);	\
1101 			break;						\
1102 		case 0b1100:						\
1103 			set_ ## w ## _perms((wr), true , true , false);	\
1104 			break;						\
1105 		case 0b1101:						\
1106 			set_ ## w ## _perms((wr), false, false, false);	\
1107 			break;						\
1108 		case 0b1110:						\
1109 			set_ ## w ## _perms((wr), true , true , true );	\
1110 			break;						\
1111 		case 0b1111:						\
1112 			set_ ## w ## _perms((wr), false, false, false);	\
1113 			break;						\
1114 		}							\
1115 									\
1116 		/* R_HJYGR */						\
1117 		set_ ## w ## _wxn((wr), ((ip) == 0b0110));		\
1118 									\
1119 	} while (0)
1120 
compute_s1_indirect_permissions(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)1121 static void compute_s1_indirect_permissions(struct kvm_vcpu *vcpu,
1122 					    struct s1_walk_info *wi,
1123 					    struct s1_walk_result *wr)
1124 {
1125 	u8 up, pp, idx;
1126 
1127 	idx = pte_pi_index(wr->desc);
1128 
1129 	switch (wi->regime) {
1130 	case TR_EL10:
1131 		pp = perm_idx(vcpu, PIR_EL1, idx);
1132 		up = perm_idx(vcpu, PIRE0_EL1, idx);
1133 		break;
1134 	case TR_EL20:
1135 		pp = perm_idx(vcpu, PIR_EL2, idx);
1136 		up = perm_idx(vcpu, PIRE0_EL2, idx);
1137 		break;
1138 	case TR_EL2:
1139 		pp = perm_idx(vcpu, PIR_EL2, idx);
1140 		up = 0;
1141 		break;
1142 	}
1143 
1144 	set_perms(priv, wr, pp);
1145 
1146 	if (wi->regime != TR_EL2)
1147 		set_perms(unpriv, wr, up);
1148 	else
1149 		set_unpriv_perms(wr, false, false, false);
1150 
1151 	wr->pov = wi->poe && !(pp & BIT(3));
1152 	wr->uov = wi->e0poe && !(up & BIT(3));
1153 
1154 	/* R_VFPJF */
1155 	if (wr->px && wr->uw) {
1156 		set_priv_perms(wr, false, false, false);
1157 		set_unpriv_perms(wr, false, false, false);
1158 	}
1159 }
1160 
compute_s1_overlay_permissions(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)1161 static void compute_s1_overlay_permissions(struct kvm_vcpu *vcpu,
1162 					   struct s1_walk_info *wi,
1163 					   struct s1_walk_result *wr)
1164 {
1165 	u8 idx, pov_perms, uov_perms;
1166 
1167 	idx = FIELD_GET(PTE_PO_IDX_MASK, wr->desc);
1168 
1169 	if (wr->pov) {
1170 		switch (wi->regime) {
1171 		case TR_EL10:
1172 			pov_perms = perm_idx(vcpu, POR_EL1, idx);
1173 			break;
1174 		case TR_EL20:
1175 			pov_perms = perm_idx(vcpu, POR_EL2, idx);
1176 			break;
1177 		case TR_EL2:
1178 			pov_perms = perm_idx(vcpu, POR_EL2, idx);
1179 			break;
1180 		}
1181 
1182 		if (pov_perms & ~POE_RWX)
1183 			pov_perms = POE_NONE;
1184 
1185 		/* R_QXXPC, S1PrivOverflow enabled */
1186 		if (wr->pwxn && (pov_perms & POE_X))
1187 			pov_perms &= ~POE_W;
1188 
1189 		wr->pr &= pov_perms & POE_R;
1190 		wr->pw &= pov_perms & POE_W;
1191 		wr->px &= pov_perms & POE_X;
1192 	}
1193 
1194 	if (wr->uov) {
1195 		switch (wi->regime) {
1196 		case TR_EL10:
1197 			uov_perms = perm_idx(vcpu, POR_EL0, idx);
1198 			break;
1199 		case TR_EL20:
1200 			uov_perms = perm_idx(vcpu, POR_EL0, idx);
1201 			break;
1202 		case TR_EL2:
1203 			uov_perms = 0;
1204 			break;
1205 		}
1206 
1207 		if (uov_perms & ~POE_RWX)
1208 			uov_perms = POE_NONE;
1209 
1210 		/* R_NPBXC, S1UnprivOverlay enabled */
1211 		if (wr->uwxn && (uov_perms & POE_X))
1212 			uov_perms &= ~POE_W;
1213 
1214 		wr->ur &= uov_perms & POE_R;
1215 		wr->uw &= uov_perms & POE_W;
1216 		wr->ux &= uov_perms & POE_X;
1217 	}
1218 }
1219 
compute_s1_permissions(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr)1220 static void compute_s1_permissions(struct kvm_vcpu *vcpu,
1221 				   struct s1_walk_info *wi,
1222 				   struct s1_walk_result *wr)
1223 {
1224 	bool pan;
1225 
1226 	if (!s1pie_enabled(vcpu, wi->regime))
1227 		compute_s1_direct_permissions(vcpu, wi, wr);
1228 	else
1229 		compute_s1_indirect_permissions(vcpu, wi, wr);
1230 
1231 	if (!wi->hpd)
1232 		compute_s1_hierarchical_permissions(vcpu, wi, wr);
1233 
1234 	compute_s1_overlay_permissions(vcpu, wi, wr);
1235 
1236 	/* R_QXXPC, S1PrivOverlay disabled */
1237 	if (!wr->pov)
1238 		wr->px &= !(wr->pwxn && wr->pw);
1239 
1240 	/* R_NPBXC, S1UnprivOverlay disabled */
1241 	if (!wr->uov)
1242 		wr->ux &= !(wr->uwxn && wr->uw);
1243 
1244 	pan = wi->pan && (wr->ur || wr->uw ||
1245 			  (pan3_enabled(vcpu, wi->regime) && wr->ux));
1246 	wr->pw &= !pan;
1247 	wr->pr &= !pan;
1248 }
1249 
handle_at_slow(struct kvm_vcpu * vcpu,u32 op,u64 vaddr,u64 * par)1250 static int handle_at_slow(struct kvm_vcpu *vcpu, u32 op, u64 vaddr, u64 *par)
1251 {
1252 	struct s1_walk_result wr = {};
1253 	struct s1_walk_info wi = {};
1254 	bool perm_fail = false;
1255 	int ret, idx;
1256 
1257 	wi.regime = compute_translation_regime(vcpu, op);
1258 	wi.as_el0 = (op == OP_AT_S1E0R || op == OP_AT_S1E0W);
1259 	wi.pan = (op == OP_AT_S1E1RP || op == OP_AT_S1E1WP) &&
1260 		 (*vcpu_cpsr(vcpu) & PSR_PAN_BIT);
1261 
1262 	ret = setup_s1_walk(vcpu, &wi, &wr, vaddr);
1263 	if (ret)
1264 		goto compute_par;
1265 
1266 	if (wr.level == S1_MMU_DISABLED)
1267 		goto compute_par;
1268 
1269 	idx = srcu_read_lock(&vcpu->kvm->srcu);
1270 
1271 	ret = walk_s1(vcpu, &wi, &wr, vaddr);
1272 
1273 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
1274 
1275 	/*
1276 	 * Race to update a descriptor -- restart the walk.
1277 	 */
1278 	if (ret == -EAGAIN)
1279 		return ret;
1280 	if (ret)
1281 		goto compute_par;
1282 
1283 	compute_s1_permissions(vcpu, &wi, &wr);
1284 
1285 	switch (op) {
1286 	case OP_AT_S1E1RP:
1287 	case OP_AT_S1E1R:
1288 	case OP_AT_S1E2R:
1289 		perm_fail = !wr.pr;
1290 		break;
1291 	case OP_AT_S1E1WP:
1292 	case OP_AT_S1E1W:
1293 	case OP_AT_S1E2W:
1294 		perm_fail = !wr.pw;
1295 		break;
1296 	case OP_AT_S1E0R:
1297 		perm_fail = !wr.ur;
1298 		break;
1299 	case OP_AT_S1E0W:
1300 		perm_fail = !wr.uw;
1301 		break;
1302 	case OP_AT_S1E1A:
1303 	case OP_AT_S1E2A:
1304 		break;
1305 	default:
1306 		BUG();
1307 	}
1308 
1309 	if (perm_fail)
1310 		fail_s1_walk(&wr, ESR_ELx_FSC_PERM_L(wr.level), false);
1311 
1312 compute_par:
1313 	*par = compute_par_s1(vcpu, &wi, &wr);
1314 	return 0;
1315 }
1316 
1317 /*
1318  * Return the PAR_EL1 value as the result of a valid translation.
1319  *
1320  * If the translation is unsuccessful, the value may only contain
1321  * PAR_EL1.F, and cannot be taken at face value. It isn't an
1322  * indication of the translation having failed, only that the fast
1323  * path did not succeed, *unless* it indicates a S1 permission or
1324  * access fault.
1325  */
__kvm_at_s1e01_fast(struct kvm_vcpu * vcpu,u32 op,u64 vaddr)1326 static u64 __kvm_at_s1e01_fast(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1327 {
1328 	struct mmu_config config;
1329 	struct kvm_s2_mmu *mmu;
1330 	bool fail, mmu_cs;
1331 	u64 par;
1332 
1333 	par = SYS_PAR_EL1_F;
1334 
1335 	/*
1336 	 * We've trapped, so everything is live on the CPU. As we will
1337 	 * be switching contexts behind everybody's back, disable
1338 	 * interrupts while holding the mmu lock.
1339 	 */
1340 	guard(write_lock_irqsave)(&vcpu->kvm->mmu_lock);
1341 
1342 	/*
1343 	 * If HCR_EL2.{E2H,TGE} == {1,1}, the MMU context is already
1344 	 * the right one (as we trapped from vEL2). If not, save the
1345 	 * full MMU context.
1346 	 *
1347 	 * We are also guaranteed to be in the correct context if
1348 	 * we're not in a nested VM.
1349 	 */
1350 	mmu_cs = (vcpu_has_nv(vcpu) &&
1351 		  !(vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)));
1352 	if (!mmu_cs)
1353 		goto skip_mmu_switch;
1354 
1355 	/*
1356 	 * Obtaining the S2 MMU for a L2 is horribly racy, and we may not
1357 	 * find it (recycled by another vcpu, for example). When this
1358 	 * happens, admit defeat immediately and use the SW (slow) path.
1359 	 */
1360 	mmu = lookup_s2_mmu(vcpu);
1361 	if (!mmu)
1362 		return par;
1363 
1364 	__mmu_config_save(&config);
1365 
1366 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR0_EL1),	SYS_TTBR0);
1367 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, TTBR1_EL1),	SYS_TTBR1);
1368 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR_EL1),	SYS_TCR);
1369 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, MAIR_EL1),	SYS_MAIR);
1370 	if (kvm_has_tcr2(vcpu->kvm)) {
1371 		write_sysreg_el1(vcpu_read_sys_reg(vcpu, TCR2_EL1), SYS_TCR2);
1372 		if (kvm_has_s1pie(vcpu->kvm)) {
1373 			write_sysreg_el1(vcpu_read_sys_reg(vcpu, PIR_EL1), SYS_PIR);
1374 			write_sysreg_el1(vcpu_read_sys_reg(vcpu, PIRE0_EL1), SYS_PIRE0);
1375 		}
1376 		if (kvm_has_s1poe(vcpu->kvm)) {
1377 			write_sysreg_el1(vcpu_read_sys_reg(vcpu, POR_EL1), SYS_POR);
1378 			write_sysreg_s(vcpu_read_sys_reg(vcpu, POR_EL0), SYS_POR_EL0);
1379 		}
1380 	}
1381 	write_sysreg_el1(vcpu_read_sys_reg(vcpu, SCTLR_EL1),	SYS_SCTLR);
1382 	__load_stage2(mmu, mmu->arch);
1383 
1384 skip_mmu_switch:
1385 	/* Temporarily switch back to guest context */
1386 	write_sysreg_hcr(vcpu->arch.hcr_el2);
1387 	isb();
1388 
1389 	switch (op) {
1390 	case OP_AT_S1E1RP:
1391 	case OP_AT_S1E1WP:
1392 		fail = at_s1e1p_fast(vcpu, op, vaddr);
1393 		break;
1394 	case OP_AT_S1E1R:
1395 		fail = __kvm_at(OP_AT_S1E1R, vaddr);
1396 		break;
1397 	case OP_AT_S1E1W:
1398 		fail = __kvm_at(OP_AT_S1E1W, vaddr);
1399 		break;
1400 	case OP_AT_S1E0R:
1401 		fail = __kvm_at(OP_AT_S1E0R, vaddr);
1402 		break;
1403 	case OP_AT_S1E0W:
1404 		fail = __kvm_at(OP_AT_S1E0W, vaddr);
1405 		break;
1406 	case OP_AT_S1E1A:
1407 		fail = __kvm_at(OP_AT_S1E1A, vaddr);
1408 		break;
1409 	default:
1410 		WARN_ON_ONCE(1);
1411 		fail = true;
1412 		break;
1413 	}
1414 
1415 	if (!fail)
1416 		par = read_sysreg_par();
1417 
1418 	write_sysreg_hcr(HCR_HOST_VHE_FLAGS);
1419 
1420 	if (mmu_cs)
1421 		__mmu_config_restore(&config);
1422 
1423 	return par;
1424 }
1425 
par_check_s1_perm_fault(u64 par)1426 static bool par_check_s1_perm_fault(u64 par)
1427 {
1428 	u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par);
1429 
1430 	return  ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM &&
1431 		 !(par & SYS_PAR_EL1_S));
1432 }
1433 
par_check_s1_access_fault(u64 par)1434 static bool par_check_s1_access_fault(u64 par)
1435 {
1436 	u8 fst = FIELD_GET(SYS_PAR_EL1_FST, par);
1437 
1438 	return  ((fst & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_ACCESS &&
1439 		 !(par & SYS_PAR_EL1_S));
1440 }
1441 
__kvm_at_s1e01(struct kvm_vcpu * vcpu,u32 op,u64 vaddr)1442 int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1443 {
1444 	u64 par = __kvm_at_s1e01_fast(vcpu, op, vaddr);
1445 	int ret;
1446 
1447 	/*
1448 	 * If PAR_EL1 reports that AT failed on a S1 permission or access
1449 	 * fault, we know for sure that the PTW was able to walk the S1
1450 	 * tables and there's nothing else to do.
1451 	 *
1452 	 * If AT failed for any other reason, then we must walk the guest S1
1453 	 * to emulate the instruction.
1454 	 */
1455 	if ((par & SYS_PAR_EL1_F) &&
1456 	    !par_check_s1_perm_fault(par) &&
1457 	    !par_check_s1_access_fault(par)) {
1458 		ret = handle_at_slow(vcpu, op, vaddr, &par);
1459 		if (ret)
1460 			return ret;
1461 	}
1462 
1463 	vcpu_write_sys_reg(vcpu, par, PAR_EL1);
1464 	return 0;
1465 }
1466 
__kvm_at_s1e2(struct kvm_vcpu * vcpu,u32 op,u64 vaddr)1467 int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1468 {
1469 	u64 par;
1470 	int ret;
1471 
1472 	/*
1473 	 * We've trapped, so everything is live on the CPU. As we will be
1474 	 * switching context behind everybody's back, disable interrupts...
1475 	 */
1476 	scoped_guard(write_lock_irqsave, &vcpu->kvm->mmu_lock) {
1477 		u64 val, hcr;
1478 		bool fail;
1479 
1480 		val = hcr = read_sysreg(hcr_el2);
1481 		val &= ~HCR_TGE;
1482 		val |= HCR_VM;
1483 
1484 		if (!vcpu_el2_e2h_is_set(vcpu))
1485 			val |= HCR_NV | HCR_NV1;
1486 
1487 		write_sysreg_hcr(val);
1488 		isb();
1489 
1490 		par = SYS_PAR_EL1_F;
1491 
1492 		switch (op) {
1493 		case OP_AT_S1E2R:
1494 			fail = __kvm_at(OP_AT_S1E1R, vaddr);
1495 			break;
1496 		case OP_AT_S1E2W:
1497 			fail = __kvm_at(OP_AT_S1E1W, vaddr);
1498 			break;
1499 		case OP_AT_S1E2A:
1500 			fail = __kvm_at(OP_AT_S1E1A, vaddr);
1501 			break;
1502 		default:
1503 			WARN_ON_ONCE(1);
1504 			fail = true;
1505 		}
1506 
1507 		isb();
1508 
1509 		if (!fail)
1510 			par = read_sysreg_par();
1511 
1512 		write_sysreg_hcr(hcr);
1513 		isb();
1514 	}
1515 
1516 	/* We failed the translation, let's replay it in slow motion */
1517 	if ((par & SYS_PAR_EL1_F) && !par_check_s1_perm_fault(par)) {
1518 		ret = handle_at_slow(vcpu, op, vaddr, &par);
1519 		if (ret)
1520 			return ret;
1521 	}
1522 
1523 	vcpu_write_sys_reg(vcpu, par, PAR_EL1);
1524 	return 0;
1525 }
1526 
__kvm_at_s12(struct kvm_vcpu * vcpu,u32 op,u64 vaddr)1527 int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
1528 {
1529 	struct kvm_s2_trans out = {};
1530 	u64 ipa, par;
1531 	bool write;
1532 	int ret;
1533 
1534 	/* Do the stage-1 translation */
1535 	switch (op) {
1536 	case OP_AT_S12E1R:
1537 		op = OP_AT_S1E1R;
1538 		write = false;
1539 		break;
1540 	case OP_AT_S12E1W:
1541 		op = OP_AT_S1E1W;
1542 		write = true;
1543 		break;
1544 	case OP_AT_S12E0R:
1545 		op = OP_AT_S1E0R;
1546 		write = false;
1547 		break;
1548 	case OP_AT_S12E0W:
1549 		op = OP_AT_S1E0W;
1550 		write = true;
1551 		break;
1552 	default:
1553 		WARN_ON_ONCE(1);
1554 		return 0;
1555 	}
1556 
1557 	__kvm_at_s1e01(vcpu, op, vaddr);
1558 	par = vcpu_read_sys_reg(vcpu, PAR_EL1);
1559 	if (par & SYS_PAR_EL1_F)
1560 		return 0;
1561 
1562 	/*
1563 	 * If we only have a single stage of translation (EL2&0), exit
1564 	 * early. Same thing if {VM,DC}=={0,0}.
1565 	 */
1566 	if (compute_translation_regime(vcpu, op) == TR_EL20 ||
1567 	    !(vcpu_read_sys_reg(vcpu, HCR_EL2) & (HCR_VM | HCR_DC)))
1568 		return 0;
1569 
1570 	/* Do the stage-2 translation */
1571 	ipa = (par & GENMASK_ULL(47, 12)) | (vaddr & GENMASK_ULL(11, 0));
1572 	out.esr = 0;
1573 	ret = kvm_walk_nested_s2(vcpu, ipa, &out);
1574 	if (ret < 0)
1575 		return ret;
1576 
1577 	/* Check the access permission */
1578 	if (!out.esr &&
1579 	    ((!write && !out.readable) || (write && !out.writable)))
1580 		out.esr = ESR_ELx_FSC_PERM_L(out.level & 0x3);
1581 
1582 	par = compute_par_s12(vcpu, par, &out);
1583 	vcpu_write_sys_reg(vcpu, par, PAR_EL1);
1584 	return 0;
1585 }
1586 
1587 /*
1588  * Translate a VA for a given EL in a given translation regime, with
1589  * or without PAN. This requires wi->{regime, as_el0, pan} to be
1590  * set. The rest of the wi and wr should be 0-initialised.
1591  */
__kvm_translate_va(struct kvm_vcpu * vcpu,struct s1_walk_info * wi,struct s1_walk_result * wr,u64 va)1592 int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
1593 		       struct s1_walk_result *wr, u64 va)
1594 {
1595 	int ret;
1596 
1597 	ret = setup_s1_walk(vcpu, wi, wr, va);
1598 	if (ret)
1599 		return ret;
1600 
1601 	if (wr->level == S1_MMU_DISABLED) {
1602 		wr->ur = wr->uw = wr->ux = true;
1603 		wr->pr = wr->pw = wr->px = true;
1604 	} else {
1605 		ret = walk_s1(vcpu, wi, wr, va);
1606 		if (ret)
1607 			return ret;
1608 
1609 		compute_s1_permissions(vcpu, wi, wr);
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 struct desc_match {
1616 	u64	ipa;
1617 	int	level;
1618 };
1619 
match_s1_desc(struct s1_walk_context * ctxt,void * priv)1620 static int match_s1_desc(struct s1_walk_context *ctxt, void *priv)
1621 {
1622 	struct desc_match *dm = priv;
1623 	u64 ipa = dm->ipa;
1624 
1625 	/* Use S1 granule alignment */
1626 	ipa &= GENMASK(51, ctxt->wi->pgshift);
1627 
1628 	/* Not the IPA we're looking for? Continue. */
1629 	if (ipa != ctxt->table_ipa)
1630 		return 0;
1631 
1632 	/* Note the level and interrupt the walk */
1633 	dm->level = ctxt->level;
1634 	return -EINTR;
1635 }
1636 
__kvm_find_s1_desc_level(struct kvm_vcpu * vcpu,u64 va,u64 ipa,int * level)1637 int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa, int *level)
1638 {
1639 	struct desc_match dm = {
1640 		.ipa	= ipa,
1641 	};
1642 	struct s1_walk_info wi = {
1643 		.filter	= &(struct s1_walk_filter){
1644 			.fn	= match_s1_desc,
1645 			.priv	= &dm,
1646 		},
1647 		.as_el0	= false,
1648 		.pan	= false,
1649 	};
1650 	struct s1_walk_result wr = {};
1651 	int ret;
1652 
1653 	if (is_hyp_ctxt(vcpu))
1654 		wi.regime = vcpu_el2_e2h_is_set(vcpu) ? TR_EL20 : TR_EL2;
1655 	else
1656 		wi.regime = TR_EL10;
1657 
1658 	ret = setup_s1_walk(vcpu, &wi, &wr, va);
1659 	if (ret)
1660 		return ret;
1661 
1662 	/* We really expect the S1 MMU to be on here... */
1663 	if (WARN_ON_ONCE(wr.level == S1_MMU_DISABLED)) {
1664 		*level = 0;
1665 		return 0;
1666 	}
1667 
1668 	/* Walk the guest's PT, looking for a match along the way */
1669 	ret = walk_s1(vcpu, &wi, &wr, va);
1670 	switch (ret) {
1671 	case -EINTR:
1672 		/* We interrupted the walk on a match, return the level */
1673 		*level = dm.level;
1674 		return 0;
1675 	case 0:
1676 		/* The walk completed, we failed to find the entry */
1677 		return -ENOENT;
1678 	default:
1679 		/* Any other error... */
1680 		return ret;
1681 	}
1682 }
1683 
__lse_swap_desc(u64 __user * ptep,u64 old,u64 new)1684 static int __lse_swap_desc(u64 __user *ptep, u64 old, u64 new)
1685 {
1686 	u64 tmp = old;
1687 	int ret = 0;
1688 
1689 	uaccess_enable_privileged();
1690 
1691 	asm volatile(__LSE_PREAMBLE
1692 		     "1: cas	%[old], %[new], %[addr]\n"
1693 		     "2:\n"
1694 		     _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
1695 		     : [old] "+r" (old), [addr] "+Q" (*ptep), [ret] "+r" (ret)
1696 		     : [new] "r" (new)
1697 		     : "memory");
1698 
1699 	uaccess_disable_privileged();
1700 
1701 	if (ret)
1702 		return ret;
1703 	if (tmp != old)
1704 		return -EAGAIN;
1705 
1706 	return ret;
1707 }
1708 
__llsc_swap_desc(u64 __user * ptep,u64 old,u64 new)1709 static int __llsc_swap_desc(u64 __user *ptep, u64 old, u64 new)
1710 {
1711 	int ret = 1;
1712 	u64 tmp;
1713 
1714 	uaccess_enable_privileged();
1715 
1716 	asm volatile("prfm	pstl1strm, %[addr]\n"
1717 		     "1: ldxr	%[tmp], %[addr]\n"
1718 		     "sub	%[tmp], %[tmp], %[old]\n"
1719 		     "cbnz	%[tmp], 3f\n"
1720 		     "2: stlxr	%w[ret], %[new], %[addr]\n"
1721 		     "3:\n"
1722 		     _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret])
1723 		     _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret])
1724 		     : [ret] "+r" (ret), [addr] "+Q" (*ptep), [tmp] "=&r" (tmp)
1725 		     : [old] "r" (old), [new] "r" (new)
1726 		     : "memory");
1727 
1728 	uaccess_disable_privileged();
1729 
1730 	/* STLXR didn't update the descriptor, or the compare failed */
1731 	if (ret == 1)
1732 		return -EAGAIN;
1733 
1734 	return ret;
1735 }
1736 
__kvm_at_swap_desc(struct kvm * kvm,gpa_t ipa,u64 old,u64 new)1737 int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new)
1738 {
1739 	struct kvm_memory_slot *slot;
1740 	unsigned long hva;
1741 	u64 __user *ptep;
1742 	bool writable;
1743 	int offset;
1744 	gfn_t gfn;
1745 	int r;
1746 
1747 	lockdep_assert(srcu_read_lock_held(&kvm->srcu));
1748 
1749 	gfn = ipa >> PAGE_SHIFT;
1750 	offset = offset_in_page(ipa);
1751 	slot = gfn_to_memslot(kvm, gfn);
1752 	hva = gfn_to_hva_memslot_prot(slot, gfn, &writable);
1753 	if (kvm_is_error_hva(hva))
1754 		return -EINVAL;
1755 	if (!writable)
1756 		return -EPERM;
1757 
1758 	ptep = (u64 __user *)hva + offset;
1759 	if (cpus_have_final_cap(ARM64_HAS_LSE_ATOMICS))
1760 		r = __lse_swap_desc(ptep, old, new);
1761 	else
1762 		r = __llsc_swap_desc(ptep, old, new);
1763 
1764 	if (r < 0)
1765 		return r;
1766 
1767 	mark_page_dirty_in_slot(kvm, slot, gfn);
1768 	return 0;
1769 }
1770