xref: /linux/arch/powerpc/include/asm/book3s/64/kup.h (revision 42e03bc5240b75007682d9941ef672d12828fc70)
13b47b754SAneesh Kumar K.V /* SPDX-License-Identifier: GPL-2.0 */
23b47b754SAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
33b47b754SAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_64_KUP_H
43b47b754SAneesh Kumar K.V 
53b47b754SAneesh Kumar K.V #include <linux/const.h>
63b47b754SAneesh Kumar K.V #include <asm/reg.h>
73b47b754SAneesh Kumar K.V 
8fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_READ	UL(0x5455555555555555)
9fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_WRITE	UL(0xa8aaaaaaaaaaaaaa)
10292f86c4SAneesh Kumar K.V #define AMR_KUEP_BLOCKED	UL(0x5455555555555555)
113b47b754SAneesh Kumar K.V #define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
123b47b754SAneesh Kumar K.V 
133b47b754SAneesh Kumar K.V #ifdef __ASSEMBLY__
143b47b754SAneesh Kumar K.V 
15ec0f9b98SAneesh Kumar K.V .macro kuap_user_restore gpr1, gpr2
168e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
173b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
18ec0f9b98SAneesh Kumar K.V 	b	100f  // skip_restore_amr
19ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
208e560921SAneesh Kumar K.V 	/*
218e560921SAneesh Kumar K.V 	 * AMR and IAMR are going to be different when
228e560921SAneesh Kumar K.V 	 * returning to userspace.
238e560921SAneesh Kumar K.V 	 */
248e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_AMR(r1)
25ec0f9b98SAneesh Kumar K.V 
26ec0f9b98SAneesh Kumar K.V 	/*
27ec0f9b98SAneesh Kumar K.V 	 * If kuap feature is not enabled, do the mtspr
28ec0f9b98SAneesh Kumar K.V 	 * only if AMR value is different.
29ec0f9b98SAneesh Kumar K.V 	 */
30ec0f9b98SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(68)
31ec0f9b98SAneesh Kumar K.V 	mfspr	\gpr2, SPRN_AMR
32ec0f9b98SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
33ec0f9b98SAneesh Kumar K.V 	beq	99f
34ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
35ec0f9b98SAneesh Kumar K.V 
368e560921SAneesh Kumar K.V 	isync
378e560921SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr1
38ec0f9b98SAneesh Kumar K.V 99:
398e560921SAneesh Kumar K.V 	/*
408e560921SAneesh Kumar K.V 	 * Restore IAMR only when returning to userspace
418e560921SAneesh Kumar K.V 	 */
428e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_IAMR(r1)
43ec0f9b98SAneesh Kumar K.V 
44ec0f9b98SAneesh Kumar K.V 	/*
45ec0f9b98SAneesh Kumar K.V 	 * If kuep feature is not enabled, do the mtspr
46ec0f9b98SAneesh Kumar K.V 	 * only if IAMR value is different.
47ec0f9b98SAneesh Kumar K.V 	 */
48ec0f9b98SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(69)
49ec0f9b98SAneesh Kumar K.V 	mfspr	\gpr2, SPRN_IAMR
50ec0f9b98SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
51ec0f9b98SAneesh Kumar K.V 	beq	100f
52ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53ec0f9b98SAneesh Kumar K.V 
54ec0f9b98SAneesh Kumar K.V 	isync
558e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr1
568e560921SAneesh Kumar K.V 
57ec0f9b98SAneesh Kumar K.V 100: //skip_restore_amr
588e560921SAneesh Kumar K.V 	/* No isync required, see kuap_user_restore() */
598e560921SAneesh Kumar K.V #endif
608e560921SAneesh Kumar K.V .endm
618e560921SAneesh Kumar K.V 
628e560921SAneesh Kumar K.V .macro kuap_kernel_restore gpr1, gpr2
638e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
648e560921SAneesh Kumar K.V 
658e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
668e560921SAneesh Kumar K.V 	/*
678e560921SAneesh Kumar K.V 	 * AMR is going to be mostly the same since we are
688e560921SAneesh Kumar K.V 	 * returning to the kernel. Compare and do a mtspr.
698e560921SAneesh Kumar K.V 	 */
703b47b754SAneesh Kumar K.V 	ld	\gpr2, STACK_REGS_AMR(r1)
718e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
723b47b754SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
738e560921SAneesh Kumar K.V 	beq	100f
743b47b754SAneesh Kumar K.V 	isync
753b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
768e560921SAneesh Kumar K.V 	/*
778e560921SAneesh Kumar K.V 	 * No isync required, see kuap_restore_amr()
788e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
798e560921SAneesh Kumar K.V 	 */
808e560921SAneesh Kumar K.V 100:
81d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
823b47b754SAneesh Kumar K.V #endif
833b47b754SAneesh Kumar K.V .endm
843b47b754SAneesh Kumar K.V 
853b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
863b47b754SAneesh Kumar K.V .macro kuap_check_amr gpr1, gpr2
873b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP_DEBUG
883b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
893b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
90fa46c2faSAneesh Kumar K.V 	/* Prevent access to userspace using any key values */
91fa46c2faSAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
923b47b754SAneesh Kumar K.V 999:	tdne	\gpr1, \gpr2
931e688dd2SChristophe Leroy 	EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
94d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
953b47b754SAneesh Kumar K.V #endif
963b47b754SAneesh Kumar K.V .endm
973b47b754SAneesh Kumar K.V #endif
983b47b754SAneesh Kumar K.V 
998e560921SAneesh Kumar K.V /*
1008e560921SAneesh Kumar K.V  *	if (pkey) {
1018e560921SAneesh Kumar K.V  *
1028e560921SAneesh Kumar K.V  *		save AMR -> stack;
1038e560921SAneesh Kumar K.V  *		if (kuap) {
1048e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
1058e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
1068e560921SAneesh Kumar K.V  *		}
1078e560921SAneesh Kumar K.V  *		if (from_user) {
1088e560921SAneesh Kumar K.V  *			save IAMR -> stack;
1098e560921SAneesh Kumar K.V  *			if (kuep) {
1108e560921SAneesh Kumar K.V  *				KUEP_BLOCKED ->IAMR
1118e560921SAneesh Kumar K.V  *			}
1128e560921SAneesh Kumar K.V  *		}
1138e560921SAneesh Kumar K.V  *		return;
1148e560921SAneesh Kumar K.V  *	}
1158e560921SAneesh Kumar K.V  *
1168e560921SAneesh Kumar K.V  *	if (kuap) {
1178e560921SAneesh Kumar K.V  *		if (from_kernel) {
1188e560921SAneesh Kumar K.V  *			save AMR -> stack;
1198e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
1208e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
1218e560921SAneesh Kumar K.V  *		}
1228e560921SAneesh Kumar K.V  *
1238e560921SAneesh Kumar K.V  *	}
1248e560921SAneesh Kumar K.V  */
1253b47b754SAneesh Kumar K.V .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
1268e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
1278e560921SAneesh Kumar K.V 
1288e560921SAneesh Kumar K.V 	/*
1298e560921SAneesh Kumar K.V 	 * if both pkey and kuap is disabled, nothing to do
1308e560921SAneesh Kumar K.V 	 */
1318e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(68)
1328e560921SAneesh Kumar K.V 	b	100f  // skip_save_amr
1338e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
1348e560921SAneesh Kumar K.V 
1358e560921SAneesh Kumar K.V 	/*
1368e560921SAneesh Kumar K.V 	 * if pkey is disabled and we are entering from userspace
1378e560921SAneesh Kumar K.V 	 * don't do anything.
1388e560921SAneesh Kumar K.V 	 */
1393b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
1403b47b754SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1418e560921SAneesh Kumar K.V 	/*
1428e560921SAneesh Kumar K.V 	 * Without pkey we are not changing AMR outside the kernel
1438e560921SAneesh Kumar K.V 	 * hence skip this completely.
1448e560921SAneesh Kumar K.V 	 */
1458e560921SAneesh Kumar K.V 	bne	\msr_pr_cr, 100f  // from userspace
1463b47b754SAneesh Kumar K.V 	.endif
1478e560921SAneesh Kumar K.V         END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
1488e560921SAneesh Kumar K.V 
1498e560921SAneesh Kumar K.V 	/*
1508e560921SAneesh Kumar K.V 	 * pkey is enabled or pkey is disabled but entering from kernel
1518e560921SAneesh Kumar K.V 	 */
1523b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
1533b47b754SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_AMR(r1)
1548e560921SAneesh Kumar K.V 
1558e560921SAneesh Kumar K.V 	/*
1568e560921SAneesh Kumar K.V 	 * update kernel AMR with AMR_KUAP_BLOCKED only
1578e560921SAneesh Kumar K.V 	 * if KUAP feature is enabled
1588e560921SAneesh Kumar K.V 	 */
1598e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(69)
1608e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
1613b47b754SAneesh Kumar K.V 	cmpd	\use_cr, \gpr1, \gpr2
1628e560921SAneesh Kumar K.V 	beq	\use_cr, 102f
1638e560921SAneesh Kumar K.V 	/*
1648e560921SAneesh Kumar K.V 	 * We don't isync here because we very recently entered via an interrupt
1658e560921SAneesh Kumar K.V 	 */
1663b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
1673b47b754SAneesh Kumar K.V 	isync
1688e560921SAneesh Kumar K.V 102:
1698e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
1708e560921SAneesh Kumar K.V 
1718e560921SAneesh Kumar K.V 	/*
1728e560921SAneesh Kumar K.V 	 * if entering from kernel we don't need save IAMR
1738e560921SAneesh Kumar K.V 	 */
1748e560921SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1758e560921SAneesh Kumar K.V 	beq	\msr_pr_cr, 100f // from kernel space
1768e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_IAMR
1778e560921SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_IAMR(r1)
1788e560921SAneesh Kumar K.V 
1798e560921SAneesh Kumar K.V 	/*
1808e560921SAneesh Kumar K.V 	 * update kernel IAMR with AMR_KUEP_BLOCKED only
1818e560921SAneesh Kumar K.V 	 * if KUEP feature is enabled
1828e560921SAneesh Kumar K.V 	 */
1838e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(70)
1848e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
1858e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr2
1868e560921SAneesh Kumar K.V 	isync
1878e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
1888e560921SAneesh Kumar K.V 	.endif
1898e560921SAneesh Kumar K.V 
1908e560921SAneesh Kumar K.V 100: // skip_save_amr
1913b47b754SAneesh Kumar K.V #endif
1923b47b754SAneesh Kumar K.V .endm
1933b47b754SAneesh Kumar K.V 
1943b47b754SAneesh Kumar K.V #else /* !__ASSEMBLY__ */
1953b47b754SAneesh Kumar K.V 
1963b47b754SAneesh Kumar K.V #include <linux/jump_label.h>
1973b47b754SAneesh Kumar K.V 
1983b47b754SAneesh Kumar K.V DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
1993b47b754SAneesh Kumar K.V 
2008e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_PKEY
2013b47b754SAneesh Kumar K.V 
2028c511effSAneesh Kumar K.V extern u64 __ro_after_init default_uamor;
2038c511effSAneesh Kumar K.V extern u64 __ro_after_init default_amr;
2048c511effSAneesh Kumar K.V extern u64 __ro_after_init default_iamr;
2058c511effSAneesh Kumar K.V 
2063b47b754SAneesh Kumar K.V #include <asm/mmu.h>
2073b47b754SAneesh Kumar K.V #include <asm/ptrace.h>
2083b47b754SAneesh Kumar K.V 
2098c511effSAneesh Kumar K.V /* usage of kthread_use_mm() should inherit the
2108c511effSAneesh Kumar K.V  * AMR value of the operating address space. But, the AMR value is
2118c511effSAneesh Kumar K.V  * thread-specific and we inherit the address space and not thread
2128c511effSAneesh Kumar K.V  * access restrictions. Because of this ignore AMR value when accessing
2138c511effSAneesh Kumar K.V  * userspace via kernel thread.
21448a8ab4eSAneesh Kumar K.V  */
21548a8ab4eSAneesh Kumar K.V static inline u64 current_thread_amr(void)
21648a8ab4eSAneesh Kumar K.V {
21748a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
21848a8ab4eSAneesh Kumar K.V 		return current->thread.regs->amr;
2198c511effSAneesh Kumar K.V 	return default_amr;
22048a8ab4eSAneesh Kumar K.V }
22148a8ab4eSAneesh Kumar K.V 
22248a8ab4eSAneesh Kumar K.V static inline u64 current_thread_iamr(void)
22348a8ab4eSAneesh Kumar K.V {
22448a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
22548a8ab4eSAneesh Kumar K.V 		return current->thread.regs->iamr;
2268c511effSAneesh Kumar K.V 	return default_iamr;
22748a8ab4eSAneesh Kumar K.V }
22848a8ab4eSAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
22948a8ab4eSAneesh Kumar K.V 
23048a8ab4eSAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
23148a8ab4eSAneesh Kumar K.V 
232c252f384SChristophe Leroy static __always_inline bool kuap_is_disabled(void)
233c252f384SChristophe Leroy {
234c252f384SChristophe Leroy 	return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
235c252f384SChristophe Leroy }
236c252f384SChristophe Leroy 
2378e560921SAneesh Kumar K.V static inline void kuap_user_restore(struct pt_regs *regs)
2383b47b754SAneesh Kumar K.V {
239ec0f9b98SAneesh Kumar K.V 	bool restore_amr = false, restore_iamr = false;
240ec0f9b98SAneesh Kumar K.V 	unsigned long amr, iamr;
241ec0f9b98SAneesh Kumar K.V 
2428e560921SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_PKEY))
2438e560921SAneesh Kumar K.V 		return;
2448e560921SAneesh Kumar K.V 
245ec0f9b98SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
246ec0f9b98SAneesh Kumar K.V 		amr = mfspr(SPRN_AMR);
247ec0f9b98SAneesh Kumar K.V 		if (amr != regs->amr)
248ec0f9b98SAneesh Kumar K.V 			restore_amr = true;
249ec0f9b98SAneesh Kumar K.V 	} else {
250ec0f9b98SAneesh Kumar K.V 		restore_amr = true;
251ec0f9b98SAneesh Kumar K.V 	}
252ec0f9b98SAneesh Kumar K.V 
253ec0f9b98SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
254ec0f9b98SAneesh Kumar K.V 		iamr = mfspr(SPRN_IAMR);
255ec0f9b98SAneesh Kumar K.V 		if (iamr != regs->iamr)
256ec0f9b98SAneesh Kumar K.V 			restore_iamr = true;
257ec0f9b98SAneesh Kumar K.V 	} else {
258ec0f9b98SAneesh Kumar K.V 		restore_iamr = true;
259ec0f9b98SAneesh Kumar K.V 	}
260ec0f9b98SAneesh Kumar K.V 
261ec0f9b98SAneesh Kumar K.V 
262ec0f9b98SAneesh Kumar K.V 	if (restore_amr || restore_iamr) {
2633b47b754SAneesh Kumar K.V 		isync();
264ec0f9b98SAneesh Kumar K.V 		if (restore_amr)
2658e560921SAneesh Kumar K.V 			mtspr(SPRN_AMR, regs->amr);
266ec0f9b98SAneesh Kumar K.V 		if (restore_iamr)
2678e560921SAneesh Kumar K.V 			mtspr(SPRN_IAMR, regs->iamr);
268ec0f9b98SAneesh Kumar K.V 	}
2693b47b754SAneesh Kumar K.V 	/*
2708e560921SAneesh Kumar K.V 	 * No isync required here because we are about to rfi
2718e560921SAneesh Kumar K.V 	 * back to previous context before any user accesses
2728e560921SAneesh Kumar K.V 	 * would be made, which is a CSI.
2733b47b754SAneesh Kumar K.V 	 */
2743b47b754SAneesh Kumar K.V }
275ec0f9b98SAneesh Kumar K.V 
276ba454f9cSChristophe Leroy static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
2778e560921SAneesh Kumar K.V {
278c252f384SChristophe Leroy 	if (likely(regs->amr == amr))
279c252f384SChristophe Leroy 		return;
280c252f384SChristophe Leroy 
2818e560921SAneesh Kumar K.V 	isync();
2828e560921SAneesh Kumar K.V 	mtspr(SPRN_AMR, regs->amr);
2838e560921SAneesh Kumar K.V 	/*
2848e560921SAneesh Kumar K.V 	 * No isync required here because we are about to rfi
2858e560921SAneesh Kumar K.V 	 * back to previous context before any user accesses
2868e560921SAneesh Kumar K.V 	 * would be made, which is a CSI.
287c252f384SChristophe Leroy 	 *
2888e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
2898e560921SAneesh Kumar K.V 	 */
2903b47b754SAneesh Kumar K.V }
2913b47b754SAneesh Kumar K.V 
292ba454f9cSChristophe Leroy static inline unsigned long __kuap_get_and_assert_locked(void)
2933b47b754SAneesh Kumar K.V {
2943b47b754SAneesh Kumar K.V 	unsigned long amr = mfspr(SPRN_AMR);
295c252f384SChristophe Leroy 
2963b47b754SAneesh Kumar K.V 	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
2973b47b754SAneesh Kumar K.V 		WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
2983b47b754SAneesh Kumar K.V 	return amr;
2993b47b754SAneesh Kumar K.V }
3003b47b754SAneesh Kumar K.V 
301*42e03bc5SChristophe Leroy /* Do nothing, book3s/64 does that in ASM */
302*42e03bc5SChristophe Leroy static inline void __kuap_lock(void)
303*42e03bc5SChristophe Leroy {
304*42e03bc5SChristophe Leroy }
305*42e03bc5SChristophe Leroy 
306*42e03bc5SChristophe Leroy static inline void __kuap_save_and_lock(struct pt_regs *regs)
307*42e03bc5SChristophe Leroy {
308*42e03bc5SChristophe Leroy }
309*42e03bc5SChristophe Leroy 
3103b47b754SAneesh Kumar K.V /*
3113b47b754SAneesh Kumar K.V  * We support individually allowing read or write, but we don't support nesting
3123b47b754SAneesh Kumar K.V  * because that would require an expensive read/modify write of the AMR.
3133b47b754SAneesh Kumar K.V  */
3143b47b754SAneesh Kumar K.V 
3153b47b754SAneesh Kumar K.V static inline unsigned long get_kuap(void)
3163b47b754SAneesh Kumar K.V {
3173b47b754SAneesh Kumar K.V 	/*
3183b47b754SAneesh Kumar K.V 	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
3193b47b754SAneesh Kumar K.V 	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
3203b47b754SAneesh Kumar K.V 	 * cause restore_user_access to do a flush.
3213b47b754SAneesh Kumar K.V 	 *
3223b47b754SAneesh Kumar K.V 	 * This has no effect in terms of actually blocking things on hash,
3233b47b754SAneesh Kumar K.V 	 * so it doesn't break anything.
3243b47b754SAneesh Kumar K.V 	 */
3257613f5a6SMichael Ellerman 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
3263b47b754SAneesh Kumar K.V 		return AMR_KUAP_BLOCKED;
3273b47b754SAneesh Kumar K.V 
3283b47b754SAneesh Kumar K.V 	return mfspr(SPRN_AMR);
3293b47b754SAneesh Kumar K.V }
3303b47b754SAneesh Kumar K.V 
3313b47b754SAneesh Kumar K.V static inline void set_kuap(unsigned long value)
3323b47b754SAneesh Kumar K.V {
3337613f5a6SMichael Ellerman 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
3343b47b754SAneesh Kumar K.V 		return;
3353b47b754SAneesh Kumar K.V 
3363b47b754SAneesh Kumar K.V 	/*
3373b47b754SAneesh Kumar K.V 	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
3383b47b754SAneesh Kumar K.V 	 * before and after the move to AMR. See table 6 on page 1134.
3393b47b754SAneesh Kumar K.V 	 */
3403b47b754SAneesh Kumar K.V 	isync();
3413b47b754SAneesh Kumar K.V 	mtspr(SPRN_AMR, value);
3423b47b754SAneesh Kumar K.V 	isync();
3433b47b754SAneesh Kumar K.V }
3443b47b754SAneesh Kumar K.V 
345ba454f9cSChristophe Leroy static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
3463b47b754SAneesh Kumar K.V {
347eb232b16SAneesh Kumar K.V 	/*
348475c8749SAneesh Kumar K.V 	 * For radix this will be a storage protection fault (DSISR_PROTFAULT).
349475c8749SAneesh Kumar K.V 	 * For hash this will be a key fault (DSISR_KEYFAULT)
350eb232b16SAneesh Kumar K.V 	 */
351475c8749SAneesh Kumar K.V 	/*
352475c8749SAneesh Kumar K.V 	 * We do have exception table entry, but accessing the
353475c8749SAneesh Kumar K.V 	 * userspace results in fault.  This could be because we
354475c8749SAneesh Kumar K.V 	 * didn't unlock the AMR or access is denied by userspace
355475c8749SAneesh Kumar K.V 	 * using a key value that blocks access. We are only interested
356475c8749SAneesh Kumar K.V 	 * in catching the use case of accessing without unlocking
357475c8749SAneesh Kumar K.V 	 * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
358475c8749SAneesh Kumar K.V 	 */
359475c8749SAneesh Kumar K.V 	if (is_write) {
3603dc12dfeSChristophe Leroy 		return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
3613b47b754SAneesh Kumar K.V 	}
3623dc12dfeSChristophe Leroy 	return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
363eb232b16SAneesh Kumar K.V }
3643b47b754SAneesh Kumar K.V 
3653b47b754SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3663b47b754SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
3673b47b754SAneesh Kumar K.V {
3684d6c551eSAneesh Kumar K.V 	unsigned long thread_amr = 0;
3694d6c551eSAneesh Kumar K.V 
3703b47b754SAneesh Kumar K.V 	// This is written so we can resolve to a single case at build time
3713b47b754SAneesh Kumar K.V 	BUILD_BUG_ON(!__builtin_constant_p(dir));
3724d6c551eSAneesh Kumar K.V 
3734d6c551eSAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_PKEY))
3744d6c551eSAneesh Kumar K.V 		thread_amr = current_thread_amr();
3754d6c551eSAneesh Kumar K.V 
3763b47b754SAneesh Kumar K.V 	if (dir == KUAP_READ)
3774d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
3783b47b754SAneesh Kumar K.V 	else if (dir == KUAP_WRITE)
3794d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
3803b47b754SAneesh Kumar K.V 	else if (dir == KUAP_READ_WRITE)
3814d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr);
3823b47b754SAneesh Kumar K.V 	else
3833b47b754SAneesh Kumar K.V 		BUILD_BUG();
3843b47b754SAneesh Kumar K.V }
3853b47b754SAneesh Kumar K.V 
3868e560921SAneesh Kumar K.V #else /* CONFIG_PPC_KUAP */
3878e560921SAneesh Kumar K.V 
3888e560921SAneesh Kumar K.V static inline unsigned long get_kuap(void)
3898e560921SAneesh Kumar K.V {
3908e560921SAneesh Kumar K.V 	return AMR_KUAP_BLOCKED;
3918e560921SAneesh Kumar K.V }
3928e560921SAneesh Kumar K.V 
3938e560921SAneesh Kumar K.V static inline void set_kuap(unsigned long value) { }
3948e560921SAneesh Kumar K.V 
3958e560921SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3968e560921SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
3978e560921SAneesh Kumar K.V { }
3988e560921SAneesh Kumar K.V 
3998e560921SAneesh Kumar K.V #endif /* !CONFIG_PPC_KUAP */
4008e560921SAneesh Kumar K.V 
401cb2f1fb2SChristophe Leroy static inline void prevent_user_access(unsigned long dir)
4023b47b754SAneesh Kumar K.V {
4033b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
4043b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
4053b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4063b47b754SAneesh Kumar K.V }
4073b47b754SAneesh Kumar K.V 
4083b47b754SAneesh Kumar K.V static inline unsigned long prevent_user_access_return(void)
4093b47b754SAneesh Kumar K.V {
4103b47b754SAneesh Kumar K.V 	unsigned long flags = get_kuap();
4113b47b754SAneesh Kumar K.V 
4123b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
4133b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
4143b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4153b47b754SAneesh Kumar K.V 
4163b47b754SAneesh Kumar K.V 	return flags;
4173b47b754SAneesh Kumar K.V }
4183b47b754SAneesh Kumar K.V 
4193b47b754SAneesh Kumar K.V static inline void restore_user_access(unsigned long flags)
4203b47b754SAneesh Kumar K.V {
4213b47b754SAneesh Kumar K.V 	set_kuap(flags);
4223b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
4233b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4243b47b754SAneesh Kumar K.V }
4253b47b754SAneesh Kumar K.V #endif /* __ASSEMBLY__ */
4263b47b754SAneesh Kumar K.V 
4273b47b754SAneesh Kumar K.V #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
428