xref: /linux/arch/powerpc/include/asm/simple_spinlock.h (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
112d0b9d6SNicholas Piggin /* SPDX-License-Identifier: GPL-2.0-or-later */
212d0b9d6SNicholas Piggin #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
312d0b9d6SNicholas Piggin #define _ASM_POWERPC_SIMPLE_SPINLOCK_H
412d0b9d6SNicholas Piggin 
512d0b9d6SNicholas Piggin /*
612d0b9d6SNicholas Piggin  * Simple spin lock operations.
712d0b9d6SNicholas Piggin  *
812d0b9d6SNicholas Piggin  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
912d0b9d6SNicholas Piggin  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
1012d0b9d6SNicholas Piggin  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
1112d0b9d6SNicholas Piggin  *	Rework to support virtual processors
1212d0b9d6SNicholas Piggin  *
1312d0b9d6SNicholas Piggin  * Type of int is used as a full 64b word is not necessary.
1412d0b9d6SNicholas Piggin  *
1512d0b9d6SNicholas Piggin  * (the type definitions are in asm/simple_spinlock_types.h)
1612d0b9d6SNicholas Piggin  */
1712d0b9d6SNicholas Piggin #include <linux/irqflags.h>
18*396f2b01SChristophe Leroy #include <linux/kcsan-checks.h>
1912d0b9d6SNicholas Piggin #include <asm/paravirt.h>
2012d0b9d6SNicholas Piggin #include <asm/paca.h>
2112d0b9d6SNicholas Piggin #include <asm/synch.h>
2212d0b9d6SNicholas Piggin #include <asm/ppc-opcode.h>
2312d0b9d6SNicholas Piggin 
2412d0b9d6SNicholas Piggin #ifdef CONFIG_PPC64
2512d0b9d6SNicholas Piggin /* use 0x800000yy when locked, where yy == CPU number */
2612d0b9d6SNicholas Piggin #ifdef __BIG_ENDIAN__
2712d0b9d6SNicholas Piggin #define LOCK_TOKEN	(*(u32 *)(&get_paca()->lock_token))
2812d0b9d6SNicholas Piggin #else
2912d0b9d6SNicholas Piggin #define LOCK_TOKEN	(*(u32 *)(&get_paca()->paca_index))
3012d0b9d6SNicholas Piggin #endif
3112d0b9d6SNicholas Piggin #else
3212d0b9d6SNicholas Piggin #define LOCK_TOKEN	1
3312d0b9d6SNicholas Piggin #endif
3412d0b9d6SNicholas Piggin 
arch_spin_value_unlocked(arch_spinlock_t lock)3512d0b9d6SNicholas Piggin static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
3612d0b9d6SNicholas Piggin {
3712d0b9d6SNicholas Piggin 	return lock.slock == 0;
3812d0b9d6SNicholas Piggin }
3912d0b9d6SNicholas Piggin 
arch_spin_is_locked(arch_spinlock_t * lock)4012d0b9d6SNicholas Piggin static inline int arch_spin_is_locked(arch_spinlock_t *lock)
4112d0b9d6SNicholas Piggin {
4266f60522SDavidlohr Bueso 	return !arch_spin_value_unlocked(READ_ONCE(*lock));
4312d0b9d6SNicholas Piggin }
4412d0b9d6SNicholas Piggin 
4512d0b9d6SNicholas Piggin /*
4612d0b9d6SNicholas Piggin  * This returns the old value in the lock, so we succeeded
4712d0b9d6SNicholas Piggin  * in getting the lock if the return value is 0.
4812d0b9d6SNicholas Piggin  */
__arch_spin_trylock(arch_spinlock_t * lock)4912d0b9d6SNicholas Piggin static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
5012d0b9d6SNicholas Piggin {
5112d0b9d6SNicholas Piggin 	unsigned long tmp, token;
5218db466aSChristophe Leroy 	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
5312d0b9d6SNicholas Piggin 
5412d0b9d6SNicholas Piggin 	token = LOCK_TOKEN;
5512d0b9d6SNicholas Piggin 	__asm__ __volatile__(
5618db466aSChristophe Leroy "1:	lwarx		%0,0,%2,%[eh]\n\
5712d0b9d6SNicholas Piggin 	cmpwi		0,%0,0\n\
5812d0b9d6SNicholas Piggin 	bne-		2f\n\
5912d0b9d6SNicholas Piggin 	stwcx.		%1,0,%2\n\
6012d0b9d6SNicholas Piggin 	bne-		1b\n"
6112d0b9d6SNicholas Piggin 	PPC_ACQUIRE_BARRIER
6212d0b9d6SNicholas Piggin "2:"
6312d0b9d6SNicholas Piggin 	: "=&r" (tmp)
6418db466aSChristophe Leroy 	: "r" (token), "r" (&lock->slock), [eh] "n" (eh)
6512d0b9d6SNicholas Piggin 	: "cr0", "memory");
6612d0b9d6SNicholas Piggin 
6712d0b9d6SNicholas Piggin 	return tmp;
6812d0b9d6SNicholas Piggin }
6912d0b9d6SNicholas Piggin 
arch_spin_trylock(arch_spinlock_t * lock)7012d0b9d6SNicholas Piggin static inline int arch_spin_trylock(arch_spinlock_t *lock)
7112d0b9d6SNicholas Piggin {
7212d0b9d6SNicholas Piggin 	return __arch_spin_trylock(lock) == 0;
7312d0b9d6SNicholas Piggin }
7412d0b9d6SNicholas Piggin 
7512d0b9d6SNicholas Piggin /*
7612d0b9d6SNicholas Piggin  * On a system with shared processors (that is, where a physical
7712d0b9d6SNicholas Piggin  * processor is multiplexed between several virtual processors),
7812d0b9d6SNicholas Piggin  * there is no point spinning on a lock if the holder of the lock
7912d0b9d6SNicholas Piggin  * isn't currently scheduled on a physical processor.  Instead
8012d0b9d6SNicholas Piggin  * we detect this situation and ask the hypervisor to give the
8112d0b9d6SNicholas Piggin  * rest of our timeslice to the lock holder.
8212d0b9d6SNicholas Piggin  *
8312d0b9d6SNicholas Piggin  * So that we can tell which virtual processor is holding a lock,
8412d0b9d6SNicholas Piggin  * we put 0x80000000 | smp_processor_id() in the lock when it is
8512d0b9d6SNicholas Piggin  * held.  Conveniently, we have a word in the paca that holds this
8612d0b9d6SNicholas Piggin  * value.
8712d0b9d6SNicholas Piggin  */
8812d0b9d6SNicholas Piggin 
8912d0b9d6SNicholas Piggin #if defined(CONFIG_PPC_SPLPAR)
9012d0b9d6SNicholas Piggin /* We only yield to the hypervisor if we are in shared processor mode */
9112d0b9d6SNicholas Piggin void splpar_spin_yield(arch_spinlock_t *lock);
9212d0b9d6SNicholas Piggin void splpar_rw_yield(arch_rwlock_t *lock);
9312d0b9d6SNicholas Piggin #else /* SPLPAR */
splpar_spin_yield(arch_spinlock_t * lock)946c6fdbb2SChengyang Fan static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
splpar_rw_yield(arch_rwlock_t * lock)956c6fdbb2SChengyang Fan static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
9612d0b9d6SNicholas Piggin #endif
9712d0b9d6SNicholas Piggin 
spin_yield(arch_spinlock_t * lock)9812d0b9d6SNicholas Piggin static inline void spin_yield(arch_spinlock_t *lock)
9912d0b9d6SNicholas Piggin {
10012d0b9d6SNicholas Piggin 	if (is_shared_processor())
10112d0b9d6SNicholas Piggin 		splpar_spin_yield(lock);
10212d0b9d6SNicholas Piggin 	else
10312d0b9d6SNicholas Piggin 		barrier();
10412d0b9d6SNicholas Piggin }
10512d0b9d6SNicholas Piggin 
rw_yield(arch_rwlock_t * lock)10612d0b9d6SNicholas Piggin static inline void rw_yield(arch_rwlock_t *lock)
10712d0b9d6SNicholas Piggin {
10812d0b9d6SNicholas Piggin 	if (is_shared_processor())
10912d0b9d6SNicholas Piggin 		splpar_rw_yield(lock);
11012d0b9d6SNicholas Piggin 	else
11112d0b9d6SNicholas Piggin 		barrier();
11212d0b9d6SNicholas Piggin }
11312d0b9d6SNicholas Piggin 
arch_spin_lock(arch_spinlock_t * lock)11412d0b9d6SNicholas Piggin static inline void arch_spin_lock(arch_spinlock_t *lock)
11512d0b9d6SNicholas Piggin {
11612d0b9d6SNicholas Piggin 	while (1) {
11712d0b9d6SNicholas Piggin 		if (likely(__arch_spin_trylock(lock) == 0))
11812d0b9d6SNicholas Piggin 			break;
11912d0b9d6SNicholas Piggin 		do {
12012d0b9d6SNicholas Piggin 			HMT_low();
12112d0b9d6SNicholas Piggin 			if (is_shared_processor())
12212d0b9d6SNicholas Piggin 				splpar_spin_yield(lock);
12312d0b9d6SNicholas Piggin 		} while (unlikely(lock->slock != 0));
12412d0b9d6SNicholas Piggin 		HMT_medium();
12512d0b9d6SNicholas Piggin 	}
12612d0b9d6SNicholas Piggin }
12712d0b9d6SNicholas Piggin 
arch_spin_unlock(arch_spinlock_t * lock)12812d0b9d6SNicholas Piggin static inline void arch_spin_unlock(arch_spinlock_t *lock)
12912d0b9d6SNicholas Piggin {
130*396f2b01SChristophe Leroy 	kcsan_mb();
13112d0b9d6SNicholas Piggin 	__asm__ __volatile__("# arch_spin_unlock\n\t"
13212d0b9d6SNicholas Piggin 				PPC_RELEASE_BARRIER: : :"memory");
13312d0b9d6SNicholas Piggin 	lock->slock = 0;
13412d0b9d6SNicholas Piggin }
13512d0b9d6SNicholas Piggin 
13612d0b9d6SNicholas Piggin /*
13712d0b9d6SNicholas Piggin  * Read-write spinlocks, allowing multiple readers
13812d0b9d6SNicholas Piggin  * but only one writer.
13912d0b9d6SNicholas Piggin  *
14012d0b9d6SNicholas Piggin  * NOTE! it is quite common to have readers in interrupts
14112d0b9d6SNicholas Piggin  * but no interrupt writers. For those circumstances we
14212d0b9d6SNicholas Piggin  * can "mix" irq-safe locks - any writer needs to get a
14312d0b9d6SNicholas Piggin  * irq-safe write-lock, but readers can get non-irqsafe
14412d0b9d6SNicholas Piggin  * read-locks.
14512d0b9d6SNicholas Piggin  */
14612d0b9d6SNicholas Piggin 
14712d0b9d6SNicholas Piggin #ifdef CONFIG_PPC64
14812d0b9d6SNicholas Piggin #define __DO_SIGN_EXTEND	"extsw	%0,%0\n"
14912d0b9d6SNicholas Piggin #define WRLOCK_TOKEN		LOCK_TOKEN	/* it's negative */
15012d0b9d6SNicholas Piggin #else
15112d0b9d6SNicholas Piggin #define __DO_SIGN_EXTEND
15212d0b9d6SNicholas Piggin #define WRLOCK_TOKEN		(-1)
15312d0b9d6SNicholas Piggin #endif
15412d0b9d6SNicholas Piggin 
15512d0b9d6SNicholas Piggin /*
15612d0b9d6SNicholas Piggin  * This returns the old value in the lock + 1,
15712d0b9d6SNicholas Piggin  * so we got a read lock if the return value is > 0.
15812d0b9d6SNicholas Piggin  */
__arch_read_trylock(arch_rwlock_t * rw)15912d0b9d6SNicholas Piggin static inline long __arch_read_trylock(arch_rwlock_t *rw)
16012d0b9d6SNicholas Piggin {
16112d0b9d6SNicholas Piggin 	long tmp;
16218db466aSChristophe Leroy 	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
16312d0b9d6SNicholas Piggin 
16412d0b9d6SNicholas Piggin 	__asm__ __volatile__(
16518db466aSChristophe Leroy "1:	lwarx		%0,0,%1,%[eh]\n"
16612d0b9d6SNicholas Piggin 	__DO_SIGN_EXTEND
16712d0b9d6SNicholas Piggin "	addic.		%0,%0,1\n\
16812d0b9d6SNicholas Piggin 	ble-		2f\n"
16912d0b9d6SNicholas Piggin "	stwcx.		%0,0,%1\n\
17012d0b9d6SNicholas Piggin 	bne-		1b\n"
17112d0b9d6SNicholas Piggin 	PPC_ACQUIRE_BARRIER
17212d0b9d6SNicholas Piggin "2:"	: "=&r" (tmp)
17318db466aSChristophe Leroy 	: "r" (&rw->lock), [eh] "n" (eh)
17412d0b9d6SNicholas Piggin 	: "cr0", "xer", "memory");
17512d0b9d6SNicholas Piggin 
17612d0b9d6SNicholas Piggin 	return tmp;
17712d0b9d6SNicholas Piggin }
17812d0b9d6SNicholas Piggin 
17912d0b9d6SNicholas Piggin /*
18012d0b9d6SNicholas Piggin  * This returns the old value in the lock,
18112d0b9d6SNicholas Piggin  * so we got the write lock if the return value is 0.
18212d0b9d6SNicholas Piggin  */
__arch_write_trylock(arch_rwlock_t * rw)18312d0b9d6SNicholas Piggin static inline long __arch_write_trylock(arch_rwlock_t *rw)
18412d0b9d6SNicholas Piggin {
18512d0b9d6SNicholas Piggin 	long tmp, token;
18618db466aSChristophe Leroy 	unsigned int eh = IS_ENABLED(CONFIG_PPC64);
18712d0b9d6SNicholas Piggin 
18812d0b9d6SNicholas Piggin 	token = WRLOCK_TOKEN;
18912d0b9d6SNicholas Piggin 	__asm__ __volatile__(
19018db466aSChristophe Leroy "1:	lwarx		%0,0,%2,%[eh]\n\
19112d0b9d6SNicholas Piggin 	cmpwi		0,%0,0\n\
19212d0b9d6SNicholas Piggin 	bne-		2f\n"
19312d0b9d6SNicholas Piggin "	stwcx.		%1,0,%2\n\
19412d0b9d6SNicholas Piggin 	bne-		1b\n"
19512d0b9d6SNicholas Piggin 	PPC_ACQUIRE_BARRIER
19612d0b9d6SNicholas Piggin "2:"	: "=&r" (tmp)
19718db466aSChristophe Leroy 	: "r" (token), "r" (&rw->lock), [eh] "n" (eh)
19812d0b9d6SNicholas Piggin 	: "cr0", "memory");
19912d0b9d6SNicholas Piggin 
20012d0b9d6SNicholas Piggin 	return tmp;
20112d0b9d6SNicholas Piggin }
20212d0b9d6SNicholas Piggin 
arch_read_lock(arch_rwlock_t * rw)20312d0b9d6SNicholas Piggin static inline void arch_read_lock(arch_rwlock_t *rw)
20412d0b9d6SNicholas Piggin {
20512d0b9d6SNicholas Piggin 	while (1) {
20612d0b9d6SNicholas Piggin 		if (likely(__arch_read_trylock(rw) > 0))
20712d0b9d6SNicholas Piggin 			break;
20812d0b9d6SNicholas Piggin 		do {
20912d0b9d6SNicholas Piggin 			HMT_low();
21012d0b9d6SNicholas Piggin 			if (is_shared_processor())
21112d0b9d6SNicholas Piggin 				splpar_rw_yield(rw);
21212d0b9d6SNicholas Piggin 		} while (unlikely(rw->lock < 0));
21312d0b9d6SNicholas Piggin 		HMT_medium();
21412d0b9d6SNicholas Piggin 	}
21512d0b9d6SNicholas Piggin }
21612d0b9d6SNicholas Piggin 
arch_write_lock(arch_rwlock_t * rw)21712d0b9d6SNicholas Piggin static inline void arch_write_lock(arch_rwlock_t *rw)
21812d0b9d6SNicholas Piggin {
21912d0b9d6SNicholas Piggin 	while (1) {
22012d0b9d6SNicholas Piggin 		if (likely(__arch_write_trylock(rw) == 0))
22112d0b9d6SNicholas Piggin 			break;
22212d0b9d6SNicholas Piggin 		do {
22312d0b9d6SNicholas Piggin 			HMT_low();
22412d0b9d6SNicholas Piggin 			if (is_shared_processor())
22512d0b9d6SNicholas Piggin 				splpar_rw_yield(rw);
22612d0b9d6SNicholas Piggin 		} while (unlikely(rw->lock != 0));
22712d0b9d6SNicholas Piggin 		HMT_medium();
22812d0b9d6SNicholas Piggin 	}
22912d0b9d6SNicholas Piggin }
23012d0b9d6SNicholas Piggin 
arch_read_trylock(arch_rwlock_t * rw)23112d0b9d6SNicholas Piggin static inline int arch_read_trylock(arch_rwlock_t *rw)
23212d0b9d6SNicholas Piggin {
23312d0b9d6SNicholas Piggin 	return __arch_read_trylock(rw) > 0;
23412d0b9d6SNicholas Piggin }
23512d0b9d6SNicholas Piggin 
arch_write_trylock(arch_rwlock_t * rw)23612d0b9d6SNicholas Piggin static inline int arch_write_trylock(arch_rwlock_t *rw)
23712d0b9d6SNicholas Piggin {
23812d0b9d6SNicholas Piggin 	return __arch_write_trylock(rw) == 0;
23912d0b9d6SNicholas Piggin }
24012d0b9d6SNicholas Piggin 
arch_read_unlock(arch_rwlock_t * rw)24112d0b9d6SNicholas Piggin static inline void arch_read_unlock(arch_rwlock_t *rw)
24212d0b9d6SNicholas Piggin {
24312d0b9d6SNicholas Piggin 	long tmp;
24412d0b9d6SNicholas Piggin 
24512d0b9d6SNicholas Piggin 	__asm__ __volatile__(
24612d0b9d6SNicholas Piggin 	"# read_unlock\n\t"
24712d0b9d6SNicholas Piggin 	PPC_RELEASE_BARRIER
24812d0b9d6SNicholas Piggin "1:	lwarx		%0,0,%1\n\
24912d0b9d6SNicholas Piggin 	addic		%0,%0,-1\n"
25012d0b9d6SNicholas Piggin "	stwcx.		%0,0,%1\n\
25112d0b9d6SNicholas Piggin 	bne-		1b"
25212d0b9d6SNicholas Piggin 	: "=&r"(tmp)
25312d0b9d6SNicholas Piggin 	: "r"(&rw->lock)
25412d0b9d6SNicholas Piggin 	: "cr0", "xer", "memory");
25512d0b9d6SNicholas Piggin }
25612d0b9d6SNicholas Piggin 
arch_write_unlock(arch_rwlock_t * rw)25712d0b9d6SNicholas Piggin static inline void arch_write_unlock(arch_rwlock_t *rw)
25812d0b9d6SNicholas Piggin {
25912d0b9d6SNicholas Piggin 	__asm__ __volatile__("# write_unlock\n\t"
26012d0b9d6SNicholas Piggin 				PPC_RELEASE_BARRIER: : :"memory");
26112d0b9d6SNicholas Piggin 	rw->lock = 0;
26212d0b9d6SNicholas Piggin }
26312d0b9d6SNicholas Piggin 
26412d0b9d6SNicholas Piggin #define arch_spin_relax(lock)	spin_yield(lock)
26512d0b9d6SNicholas Piggin #define arch_read_relax(lock)	rw_yield(lock)
26612d0b9d6SNicholas Piggin #define arch_write_relax(lock)	rw_yield(lock)
26712d0b9d6SNicholas Piggin 
26812d0b9d6SNicholas Piggin #endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
269