xref: /linux/arch/powerpc/include/asm/book3s/64/kup.h (revision 7613f5a66becfd0e43a0f34de8518695888f5458)
13b47b754SAneesh Kumar K.V /* SPDX-License-Identifier: GPL-2.0 */
23b47b754SAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
33b47b754SAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_64_KUP_H
43b47b754SAneesh Kumar K.V 
53b47b754SAneesh Kumar K.V #include <linux/const.h>
63b47b754SAneesh Kumar K.V #include <asm/reg.h>
73b47b754SAneesh Kumar K.V 
8fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_READ	UL(0x5455555555555555)
9fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_WRITE	UL(0xa8aaaaaaaaaaaaaa)
10292f86c4SAneesh Kumar K.V #define AMR_KUEP_BLOCKED	UL(0x5455555555555555)
113b47b754SAneesh Kumar K.V #define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
123b47b754SAneesh Kumar K.V 
133b47b754SAneesh Kumar K.V #ifdef __ASSEMBLY__
143b47b754SAneesh Kumar K.V 
15ec0f9b98SAneesh Kumar K.V .macro kuap_user_restore gpr1, gpr2
168e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
173b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
18ec0f9b98SAneesh Kumar K.V 	b	100f  // skip_restore_amr
19ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
208e560921SAneesh Kumar K.V 	/*
218e560921SAneesh Kumar K.V 	 * AMR and IAMR are going to be different when
228e560921SAneesh Kumar K.V 	 * returning to userspace.
238e560921SAneesh Kumar K.V 	 */
248e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_AMR(r1)
25ec0f9b98SAneesh Kumar K.V 
26ec0f9b98SAneesh Kumar K.V 	/*
27ec0f9b98SAneesh Kumar K.V 	 * If kuap feature is not enabled, do the mtspr
28ec0f9b98SAneesh Kumar K.V 	 * only if AMR value is different.
29ec0f9b98SAneesh Kumar K.V 	 */
30ec0f9b98SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(68)
31ec0f9b98SAneesh Kumar K.V 	mfspr	\gpr2, SPRN_AMR
32ec0f9b98SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
33ec0f9b98SAneesh Kumar K.V 	beq	99f
34ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
35ec0f9b98SAneesh Kumar K.V 
368e560921SAneesh Kumar K.V 	isync
378e560921SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr1
38ec0f9b98SAneesh Kumar K.V 99:
398e560921SAneesh Kumar K.V 	/*
408e560921SAneesh Kumar K.V 	 * Restore IAMR only when returning to userspace
418e560921SAneesh Kumar K.V 	 */
428e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_IAMR(r1)
43ec0f9b98SAneesh Kumar K.V 
44ec0f9b98SAneesh Kumar K.V 	/*
45ec0f9b98SAneesh Kumar K.V 	 * If kuep feature is not enabled, do the mtspr
46ec0f9b98SAneesh Kumar K.V 	 * only if IAMR value is different.
47ec0f9b98SAneesh Kumar K.V 	 */
48ec0f9b98SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(69)
49ec0f9b98SAneesh Kumar K.V 	mfspr	\gpr2, SPRN_IAMR
50ec0f9b98SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
51ec0f9b98SAneesh Kumar K.V 	beq	100f
52ec0f9b98SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53ec0f9b98SAneesh Kumar K.V 
54ec0f9b98SAneesh Kumar K.V 	isync
558e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr1
568e560921SAneesh Kumar K.V 
57ec0f9b98SAneesh Kumar K.V 100: //skip_restore_amr
588e560921SAneesh Kumar K.V 	/* No isync required, see kuap_user_restore() */
598e560921SAneesh Kumar K.V #endif
608e560921SAneesh Kumar K.V .endm
618e560921SAneesh Kumar K.V 
628e560921SAneesh Kumar K.V .macro kuap_kernel_restore gpr1, gpr2
638e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
648e560921SAneesh Kumar K.V 
658e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
668e560921SAneesh Kumar K.V 	/*
678e560921SAneesh Kumar K.V 	 * AMR is going to be mostly the same since we are
688e560921SAneesh Kumar K.V 	 * returning to the kernel. Compare and do a mtspr.
698e560921SAneesh Kumar K.V 	 */
703b47b754SAneesh Kumar K.V 	ld	\gpr2, STACK_REGS_AMR(r1)
718e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
723b47b754SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
738e560921SAneesh Kumar K.V 	beq	100f
743b47b754SAneesh Kumar K.V 	isync
753b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
768e560921SAneesh Kumar K.V 	/*
778e560921SAneesh Kumar K.V 	 * No isync required, see kuap_restore_amr()
788e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
798e560921SAneesh Kumar K.V 	 */
808e560921SAneesh Kumar K.V 100:
81d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
823b47b754SAneesh Kumar K.V #endif
833b47b754SAneesh Kumar K.V .endm
843b47b754SAneesh Kumar K.V 
853b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
863b47b754SAneesh Kumar K.V .macro kuap_check_amr gpr1, gpr2
873b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP_DEBUG
883b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
893b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
90fa46c2faSAneesh Kumar K.V 	/* Prevent access to userspace using any key values */
91fa46c2faSAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
923b47b754SAneesh Kumar K.V 999:	tdne	\gpr1, \gpr2
933b47b754SAneesh Kumar K.V 	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
94d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
953b47b754SAneesh Kumar K.V #endif
963b47b754SAneesh Kumar K.V .endm
973b47b754SAneesh Kumar K.V #endif
983b47b754SAneesh Kumar K.V 
998e560921SAneesh Kumar K.V /*
1008e560921SAneesh Kumar K.V  *	if (pkey) {
1018e560921SAneesh Kumar K.V  *
1028e560921SAneesh Kumar K.V  *		save AMR -> stack;
1038e560921SAneesh Kumar K.V  *		if (kuap) {
1048e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
1058e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
1068e560921SAneesh Kumar K.V  *		}
1078e560921SAneesh Kumar K.V  *		if (from_user) {
1088e560921SAneesh Kumar K.V  *			save IAMR -> stack;
1098e560921SAneesh Kumar K.V  *			if (kuep) {
1108e560921SAneesh Kumar K.V  *				KUEP_BLOCKED ->IAMR
1118e560921SAneesh Kumar K.V  *			}
1128e560921SAneesh Kumar K.V  *		}
1138e560921SAneesh Kumar K.V  *		return;
1148e560921SAneesh Kumar K.V  *	}
1158e560921SAneesh Kumar K.V  *
1168e560921SAneesh Kumar K.V  *	if (kuap) {
1178e560921SAneesh Kumar K.V  *		if (from_kernel) {
1188e560921SAneesh Kumar K.V  *			save AMR -> stack;
1198e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
1208e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
1218e560921SAneesh Kumar K.V  *		}
1228e560921SAneesh Kumar K.V  *
1238e560921SAneesh Kumar K.V  *	}
1248e560921SAneesh Kumar K.V  */
1253b47b754SAneesh Kumar K.V .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
1268e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
1278e560921SAneesh Kumar K.V 
1288e560921SAneesh Kumar K.V 	/*
1298e560921SAneesh Kumar K.V 	 * if both pkey and kuap is disabled, nothing to do
1308e560921SAneesh Kumar K.V 	 */
1318e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(68)
1328e560921SAneesh Kumar K.V 	b	100f  // skip_save_amr
1338e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
1348e560921SAneesh Kumar K.V 
1358e560921SAneesh Kumar K.V 	/*
1368e560921SAneesh Kumar K.V 	 * if pkey is disabled and we are entering from userspace
1378e560921SAneesh Kumar K.V 	 * don't do anything.
1388e560921SAneesh Kumar K.V 	 */
1393b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
1403b47b754SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1418e560921SAneesh Kumar K.V 	/*
1428e560921SAneesh Kumar K.V 	 * Without pkey we are not changing AMR outside the kernel
1438e560921SAneesh Kumar K.V 	 * hence skip this completely.
1448e560921SAneesh Kumar K.V 	 */
1458e560921SAneesh Kumar K.V 	bne	\msr_pr_cr, 100f  // from userspace
1463b47b754SAneesh Kumar K.V 	.endif
1478e560921SAneesh Kumar K.V         END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
1488e560921SAneesh Kumar K.V 
1498e560921SAneesh Kumar K.V 	/*
1508e560921SAneesh Kumar K.V 	 * pkey is enabled or pkey is disabled but entering from kernel
1518e560921SAneesh Kumar K.V 	 */
1523b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
1533b47b754SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_AMR(r1)
1548e560921SAneesh Kumar K.V 
1558e560921SAneesh Kumar K.V 	/*
1568e560921SAneesh Kumar K.V 	 * update kernel AMR with AMR_KUAP_BLOCKED only
1578e560921SAneesh Kumar K.V 	 * if KUAP feature is enabled
1588e560921SAneesh Kumar K.V 	 */
1598e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(69)
1608e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
1613b47b754SAneesh Kumar K.V 	cmpd	\use_cr, \gpr1, \gpr2
1628e560921SAneesh Kumar K.V 	beq	\use_cr, 102f
1638e560921SAneesh Kumar K.V 	/*
1648e560921SAneesh Kumar K.V 	 * We don't isync here because we very recently entered via an interrupt
1658e560921SAneesh Kumar K.V 	 */
1663b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
1673b47b754SAneesh Kumar K.V 	isync
1688e560921SAneesh Kumar K.V 102:
1698e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
1708e560921SAneesh Kumar K.V 
1718e560921SAneesh Kumar K.V 	/*
1728e560921SAneesh Kumar K.V 	 * if entering from kernel we don't need save IAMR
1738e560921SAneesh Kumar K.V 	 */
1748e560921SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1758e560921SAneesh Kumar K.V 	beq	\msr_pr_cr, 100f // from kernel space
1768e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_IAMR
1778e560921SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_IAMR(r1)
1788e560921SAneesh Kumar K.V 
1798e560921SAneesh Kumar K.V 	/*
1808e560921SAneesh Kumar K.V 	 * update kernel IAMR with AMR_KUEP_BLOCKED only
1818e560921SAneesh Kumar K.V 	 * if KUEP feature is enabled
1828e560921SAneesh Kumar K.V 	 */
1838e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(70)
1848e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
1858e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr2
1868e560921SAneesh Kumar K.V 	isync
1878e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
1888e560921SAneesh Kumar K.V 	.endif
1898e560921SAneesh Kumar K.V 
1908e560921SAneesh Kumar K.V 100: // skip_save_amr
1913b47b754SAneesh Kumar K.V #endif
1923b47b754SAneesh Kumar K.V .endm
1933b47b754SAneesh Kumar K.V 
1943b47b754SAneesh Kumar K.V #else /* !__ASSEMBLY__ */
1953b47b754SAneesh Kumar K.V 
1963b47b754SAneesh Kumar K.V #include <linux/jump_label.h>
1973b47b754SAneesh Kumar K.V 
1983b47b754SAneesh Kumar K.V DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
1993b47b754SAneesh Kumar K.V 
2008e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_PKEY
2013b47b754SAneesh Kumar K.V 
2023b47b754SAneesh Kumar K.V #include <asm/mmu.h>
2033b47b754SAneesh Kumar K.V #include <asm/ptrace.h>
2043b47b754SAneesh Kumar K.V 
20548a8ab4eSAneesh Kumar K.V /*
20648a8ab4eSAneesh Kumar K.V  * For kernel thread that doesn't have thread.regs return
20748a8ab4eSAneesh Kumar K.V  * default AMR/IAMR values.
20848a8ab4eSAneesh Kumar K.V  */
20948a8ab4eSAneesh Kumar K.V static inline u64 current_thread_amr(void)
21048a8ab4eSAneesh Kumar K.V {
21148a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
21248a8ab4eSAneesh Kumar K.V 		return current->thread.regs->amr;
21348a8ab4eSAneesh Kumar K.V 	return AMR_KUAP_BLOCKED;
21448a8ab4eSAneesh Kumar K.V }
21548a8ab4eSAneesh Kumar K.V 
21648a8ab4eSAneesh Kumar K.V static inline u64 current_thread_iamr(void)
21748a8ab4eSAneesh Kumar K.V {
21848a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
21948a8ab4eSAneesh Kumar K.V 		return current->thread.regs->iamr;
22048a8ab4eSAneesh Kumar K.V 	return AMR_KUEP_BLOCKED;
22148a8ab4eSAneesh Kumar K.V }
22248a8ab4eSAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
22348a8ab4eSAneesh Kumar K.V 
22448a8ab4eSAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
22548a8ab4eSAneesh Kumar K.V 
2268e560921SAneesh Kumar K.V static inline void kuap_user_restore(struct pt_regs *regs)
2273b47b754SAneesh Kumar K.V {
228ec0f9b98SAneesh Kumar K.V 	bool restore_amr = false, restore_iamr = false;
229ec0f9b98SAneesh Kumar K.V 	unsigned long amr, iamr;
230ec0f9b98SAneesh Kumar K.V 
2318e560921SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_PKEY))
2328e560921SAneesh Kumar K.V 		return;
2338e560921SAneesh Kumar K.V 
234ec0f9b98SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
235ec0f9b98SAneesh Kumar K.V 		amr = mfspr(SPRN_AMR);
236ec0f9b98SAneesh Kumar K.V 		if (amr != regs->amr)
237ec0f9b98SAneesh Kumar K.V 			restore_amr = true;
238ec0f9b98SAneesh Kumar K.V 	} else {
239ec0f9b98SAneesh Kumar K.V 		restore_amr = true;
240ec0f9b98SAneesh Kumar K.V 	}
241ec0f9b98SAneesh Kumar K.V 
242ec0f9b98SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
243ec0f9b98SAneesh Kumar K.V 		iamr = mfspr(SPRN_IAMR);
244ec0f9b98SAneesh Kumar K.V 		if (iamr != regs->iamr)
245ec0f9b98SAneesh Kumar K.V 			restore_iamr = true;
246ec0f9b98SAneesh Kumar K.V 	} else {
247ec0f9b98SAneesh Kumar K.V 		restore_iamr = true;
248ec0f9b98SAneesh Kumar K.V 	}
249ec0f9b98SAneesh Kumar K.V 
250ec0f9b98SAneesh Kumar K.V 
251ec0f9b98SAneesh Kumar K.V 	if (restore_amr || restore_iamr) {
2523b47b754SAneesh Kumar K.V 		isync();
253ec0f9b98SAneesh Kumar K.V 		if (restore_amr)
2548e560921SAneesh Kumar K.V 			mtspr(SPRN_AMR, regs->amr);
255ec0f9b98SAneesh Kumar K.V 		if (restore_iamr)
2568e560921SAneesh Kumar K.V 			mtspr(SPRN_IAMR, regs->iamr);
257ec0f9b98SAneesh Kumar K.V 	}
2583b47b754SAneesh Kumar K.V 	/*
2598e560921SAneesh Kumar K.V 	 * No isync required here because we are about to rfi
2608e560921SAneesh Kumar K.V 	 * back to previous context before any user accesses
2618e560921SAneesh Kumar K.V 	 * would be made, which is a CSI.
2623b47b754SAneesh Kumar K.V 	 */
2633b47b754SAneesh Kumar K.V }
264ec0f9b98SAneesh Kumar K.V 
2658e560921SAneesh Kumar K.V static inline void kuap_kernel_restore(struct pt_regs *regs,
2668e560921SAneesh Kumar K.V 					   unsigned long amr)
2678e560921SAneesh Kumar K.V {
2688e560921SAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
2698e560921SAneesh Kumar K.V 		if (unlikely(regs->amr != amr)) {
2708e560921SAneesh Kumar K.V 			isync();
2718e560921SAneesh Kumar K.V 			mtspr(SPRN_AMR, regs->amr);
2728e560921SAneesh Kumar K.V 			/*
2738e560921SAneesh Kumar K.V 			 * No isync required here because we are about to rfi
2748e560921SAneesh Kumar K.V 			 * back to previous context before any user accesses
2758e560921SAneesh Kumar K.V 			 * would be made, which is a CSI.
2768e560921SAneesh Kumar K.V 			 */
2778e560921SAneesh Kumar K.V 		}
2788e560921SAneesh Kumar K.V 	}
2798e560921SAneesh Kumar K.V 	/*
2808e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
2818e560921SAneesh Kumar K.V 	 */
2823b47b754SAneesh Kumar K.V }
2833b47b754SAneesh Kumar K.V 
2843b47b754SAneesh Kumar K.V static inline unsigned long kuap_get_and_check_amr(void)
2853b47b754SAneesh Kumar K.V {
286d5b810b5SAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
2873b47b754SAneesh Kumar K.V 		unsigned long amr = mfspr(SPRN_AMR);
2883b47b754SAneesh Kumar K.V 		if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
2893b47b754SAneesh Kumar K.V 			WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
2903b47b754SAneesh Kumar K.V 		return amr;
2913b47b754SAneesh Kumar K.V 	}
2923b47b754SAneesh Kumar K.V 	return 0;
2933b47b754SAneesh Kumar K.V }
2943b47b754SAneesh Kumar K.V 
2958e560921SAneesh Kumar K.V #else /* CONFIG_PPC_PKEY */
2968e560921SAneesh Kumar K.V 
2978e560921SAneesh Kumar K.V static inline void kuap_user_restore(struct pt_regs *regs)
2988e560921SAneesh Kumar K.V {
2998e560921SAneesh Kumar K.V }
3008e560921SAneesh Kumar K.V 
3018e560921SAneesh Kumar K.V static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
3028e560921SAneesh Kumar K.V {
3038e560921SAneesh Kumar K.V }
3048e560921SAneesh Kumar K.V 
3058e560921SAneesh Kumar K.V static inline unsigned long kuap_get_and_check_amr(void)
3068e560921SAneesh Kumar K.V {
3078e560921SAneesh Kumar K.V 	return 0;
3088e560921SAneesh Kumar K.V }
3098e560921SAneesh Kumar K.V 
3108e560921SAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
3118e560921SAneesh Kumar K.V 
3128e560921SAneesh Kumar K.V 
3138e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
3148e560921SAneesh Kumar K.V 
3153b47b754SAneesh Kumar K.V static inline void kuap_check_amr(void)
3163b47b754SAneesh Kumar K.V {
317d5b810b5SAneesh Kumar K.V 	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
3183b47b754SAneesh Kumar K.V 		WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
3193b47b754SAneesh Kumar K.V }
3203b47b754SAneesh Kumar K.V 
3213b47b754SAneesh Kumar K.V /*
3223b47b754SAneesh Kumar K.V  * We support individually allowing read or write, but we don't support nesting
3233b47b754SAneesh Kumar K.V  * because that would require an expensive read/modify write of the AMR.
3243b47b754SAneesh Kumar K.V  */
3253b47b754SAneesh Kumar K.V 
3263b47b754SAneesh Kumar K.V static inline unsigned long get_kuap(void)
3273b47b754SAneesh Kumar K.V {
3283b47b754SAneesh Kumar K.V 	/*
3293b47b754SAneesh Kumar K.V 	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
3303b47b754SAneesh Kumar K.V 	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
3313b47b754SAneesh Kumar K.V 	 * cause restore_user_access to do a flush.
3323b47b754SAneesh Kumar K.V 	 *
3333b47b754SAneesh Kumar K.V 	 * This has no effect in terms of actually blocking things on hash,
3343b47b754SAneesh Kumar K.V 	 * so it doesn't break anything.
3353b47b754SAneesh Kumar K.V 	 */
336*7613f5a6SMichael Ellerman 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
3373b47b754SAneesh Kumar K.V 		return AMR_KUAP_BLOCKED;
3383b47b754SAneesh Kumar K.V 
3393b47b754SAneesh Kumar K.V 	return mfspr(SPRN_AMR);
3403b47b754SAneesh Kumar K.V }
3413b47b754SAneesh Kumar K.V 
3423b47b754SAneesh Kumar K.V static inline void set_kuap(unsigned long value)
3433b47b754SAneesh Kumar K.V {
344*7613f5a6SMichael Ellerman 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
3453b47b754SAneesh Kumar K.V 		return;
3463b47b754SAneesh Kumar K.V 
3473b47b754SAneesh Kumar K.V 	/*
3483b47b754SAneesh Kumar K.V 	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
3493b47b754SAneesh Kumar K.V 	 * before and after the move to AMR. See table 6 on page 1134.
3503b47b754SAneesh Kumar K.V 	 */
3513b47b754SAneesh Kumar K.V 	isync();
3523b47b754SAneesh Kumar K.V 	mtspr(SPRN_AMR, value);
3533b47b754SAneesh Kumar K.V 	isync();
3543b47b754SAneesh Kumar K.V }
3553b47b754SAneesh Kumar K.V 
356eb232b16SAneesh Kumar K.V static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
357475c8749SAneesh Kumar K.V 				  bool is_write)
3583b47b754SAneesh Kumar K.V {
359eb232b16SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
360eb232b16SAneesh Kumar K.V 		return false;
361eb232b16SAneesh Kumar K.V 	/*
362475c8749SAneesh Kumar K.V 	 * For radix this will be a storage protection fault (DSISR_PROTFAULT).
363475c8749SAneesh Kumar K.V 	 * For hash this will be a key fault (DSISR_KEYFAULT)
364eb232b16SAneesh Kumar K.V 	 */
365475c8749SAneesh Kumar K.V 	/*
366475c8749SAneesh Kumar K.V 	 * We do have exception table entry, but accessing the
367475c8749SAneesh Kumar K.V 	 * userspace results in fault.  This could be because we
368475c8749SAneesh Kumar K.V 	 * didn't unlock the AMR or access is denied by userspace
369475c8749SAneesh Kumar K.V 	 * using a key value that blocks access. We are only interested
370475c8749SAneesh Kumar K.V 	 * in catching the use case of accessing without unlocking
371475c8749SAneesh Kumar K.V 	 * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
372475c8749SAneesh Kumar K.V 	 */
373475c8749SAneesh Kumar K.V 	if (is_write) {
3743dc12dfeSChristophe Leroy 		return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
3753b47b754SAneesh Kumar K.V 	}
3763dc12dfeSChristophe Leroy 	return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
377eb232b16SAneesh Kumar K.V }
3783b47b754SAneesh Kumar K.V 
3793b47b754SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3803b47b754SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
3813b47b754SAneesh Kumar K.V {
3824d6c551eSAneesh Kumar K.V 	unsigned long thread_amr = 0;
3834d6c551eSAneesh Kumar K.V 
3843b47b754SAneesh Kumar K.V 	// This is written so we can resolve to a single case at build time
3853b47b754SAneesh Kumar K.V 	BUILD_BUG_ON(!__builtin_constant_p(dir));
3864d6c551eSAneesh Kumar K.V 
3874d6c551eSAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_PKEY))
3884d6c551eSAneesh Kumar K.V 		thread_amr = current_thread_amr();
3894d6c551eSAneesh Kumar K.V 
3903b47b754SAneesh Kumar K.V 	if (dir == KUAP_READ)
3914d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
3923b47b754SAneesh Kumar K.V 	else if (dir == KUAP_WRITE)
3934d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
3943b47b754SAneesh Kumar K.V 	else if (dir == KUAP_READ_WRITE)
3954d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr);
3963b47b754SAneesh Kumar K.V 	else
3973b47b754SAneesh Kumar K.V 		BUILD_BUG();
3983b47b754SAneesh Kumar K.V }
3993b47b754SAneesh Kumar K.V 
4008e560921SAneesh Kumar K.V #else /* CONFIG_PPC_KUAP */
4018e560921SAneesh Kumar K.V 
4028e560921SAneesh Kumar K.V static inline unsigned long get_kuap(void)
4038e560921SAneesh Kumar K.V {
4048e560921SAneesh Kumar K.V 	return AMR_KUAP_BLOCKED;
4058e560921SAneesh Kumar K.V }
4068e560921SAneesh Kumar K.V 
4078e560921SAneesh Kumar K.V static inline void set_kuap(unsigned long value) { }
4088e560921SAneesh Kumar K.V 
4098e560921SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
4108e560921SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
4118e560921SAneesh Kumar K.V { }
4128e560921SAneesh Kumar K.V 
4138e560921SAneesh Kumar K.V #endif /* !CONFIG_PPC_KUAP */
4148e560921SAneesh Kumar K.V 
4153b47b754SAneesh Kumar K.V static inline void prevent_user_access(void __user *to, const void __user *from,
4163b47b754SAneesh Kumar K.V 				       unsigned long size, unsigned long dir)
4173b47b754SAneesh Kumar K.V {
4183b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
4193b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
4203b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4213b47b754SAneesh Kumar K.V }
4223b47b754SAneesh Kumar K.V 
4233b47b754SAneesh Kumar K.V static inline unsigned long prevent_user_access_return(void)
4243b47b754SAneesh Kumar K.V {
4253b47b754SAneesh Kumar K.V 	unsigned long flags = get_kuap();
4263b47b754SAneesh Kumar K.V 
4273b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
4283b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
4293b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4303b47b754SAneesh Kumar K.V 
4313b47b754SAneesh Kumar K.V 	return flags;
4323b47b754SAneesh Kumar K.V }
4333b47b754SAneesh Kumar K.V 
4343b47b754SAneesh Kumar K.V static inline void restore_user_access(unsigned long flags)
4353b47b754SAneesh Kumar K.V {
4363b47b754SAneesh Kumar K.V 	set_kuap(flags);
4373b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
4383b47b754SAneesh Kumar K.V 		do_uaccess_flush();
4393b47b754SAneesh Kumar K.V }
4403b47b754SAneesh Kumar K.V #endif /* __ASSEMBLY__ */
4413b47b754SAneesh Kumar K.V 
4423b47b754SAneesh Kumar K.V #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
443