xref: /linux/arch/arm64/kvm/hyp/include/nvhe/spinlock.h (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * A stand-alone ticket spinlock implementation for use by the non-VHE
4  * KVM hypervisor code running at EL2.
5  *
6  * Copyright (C) 2020 Google LLC
7  * Author: Will Deacon <will@kernel.org>
8  *
9  * Heavily based on the implementation removed by c11090474d70 which was:
10  * Copyright (C) 2012 ARM Ltd.
11  */
12 
13 #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
14 #define __ARM64_KVM_NVHE_SPINLOCK_H__
15 
16 #include <asm/alternative.h>
17 #include <asm/lse.h>
18 #include <asm/rwonce.h>
19 
20 typedef union hyp_spinlock {
21 	u32	__val;
22 	struct {
23 #ifdef __AARCH64EB__
24 		u16 next, owner;
25 #else
26 		u16 owner, next;
27 #endif
28 	};
29 } hyp_spinlock_t;
30 
31 #define __HYP_SPIN_LOCK_INITIALIZER \
32 	{ .__val = 0 }
33 
34 #define __HYP_SPIN_LOCK_UNLOCKED \
35 	((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER)
36 
37 #define DEFINE_HYP_SPINLOCK(x)	hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED
38 
39 #define hyp_spin_lock_init(l)						\
40 do {									\
41 	*(l) = __HYP_SPIN_LOCK_UNLOCKED;				\
42 } while (0)
43 
hyp_spin_lock(hyp_spinlock_t * lock)44 static inline void hyp_spin_lock(hyp_spinlock_t *lock)
45 {
46 	u32 tmp;
47 	hyp_spinlock_t lockval, newval;
48 
49 	asm volatile(
50 	/* Atomically increment the next ticket. */
51 	ARM64_LSE_ATOMIC_INSN(
52 	/* LL/SC */
53 "	prfm	pstl1strm, %3\n"
54 "1:	ldaxr	%w0, %3\n"
55 "	add	%w1, %w0, #(1 << 16)\n"
56 "	stxr	%w2, %w1, %3\n"
57 "	cbnz	%w2, 1b\n",
58 	/* LSE atomics */
59 "	mov	%w2, #(1 << 16)\n"
60 "	ldadda	%w2, %w0, %3\n"
61 	__nops(3))
62 
63 	/* Did we get the lock? */
64 "	eor	%w1, %w0, %w0, ror #16\n"
65 "	cbz	%w1, 3f\n"
66 	/*
67 	 * No: spin on the owner. Send a local event to avoid missing an
68 	 * unlock before the exclusive load.
69 	 */
70 "	sevl\n"
71 "2:	wfe\n"
72 "	ldaxrh	%w2, %4\n"
73 "	eor	%w1, %w2, %w0, lsr #16\n"
74 "	cbnz	%w1, 2b\n"
75 	/* We got the lock. Critical section starts here. */
76 "3:"
77 	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
78 	: "Q" (lock->owner)
79 	: "memory");
80 }
81 
hyp_spin_unlock(hyp_spinlock_t * lock)82 static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
83 {
84 	u64 tmp;
85 
86 	asm volatile(
87 	ARM64_LSE_ATOMIC_INSN(
88 	/* LL/SC */
89 	"	ldrh	%w1, %0\n"
90 	"	add	%w1, %w1, #1\n"
91 	"	stlrh	%w1, %0",
92 	/* LSE atomics */
93 	"	mov	%w1, #1\n"
94 	"	staddlh	%w1, %0\n"
95 	__nops(1))
96 	: "=Q" (lock->owner), "=&r" (tmp)
97 	:
98 	: "memory");
99 }
100 
hyp_spin_is_locked(hyp_spinlock_t * lock)101 static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)
102 {
103 	hyp_spinlock_t lockval = READ_ONCE(*lock);
104 
105 	return lockval.owner != lockval.next;
106 }
107 
108 #ifdef CONFIG_NVHE_EL2_DEBUG
hyp_assert_lock_held(hyp_spinlock_t * lock)109 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)
110 {
111 	/*
112 	 * The __pkvm_init() path accesses protected data-structures without
113 	 * holding locks as the other CPUs are guaranteed to not enter EL2
114 	 * concurrently at this point in time. The point by which EL2 is
115 	 * initialized on all CPUs is reflected in the pkvm static key, so
116 	 * wait until it is set before checking the lock state.
117 	 */
118 	if (static_branch_likely(&kvm_protected_mode_initialized))
119 		BUG_ON(!hyp_spin_is_locked(lock));
120 }
121 #else
hyp_assert_lock_held(hyp_spinlock_t * lock)122 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }
123 #endif
124 
125 #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
126