xref: /linux/arch/powerpc/include/asm/book3s/64/kup.h (revision 292f86c4c683a1064aff7210348da088c1573ee0)
13b47b754SAneesh Kumar K.V /* SPDX-License-Identifier: GPL-2.0 */
23b47b754SAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
33b47b754SAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_64_KUP_H
43b47b754SAneesh Kumar K.V 
53b47b754SAneesh Kumar K.V #include <linux/const.h>
63b47b754SAneesh Kumar K.V #include <asm/reg.h>
73b47b754SAneesh Kumar K.V 
8fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_READ	UL(0x5455555555555555)
9fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_WRITE	UL(0xa8aaaaaaaaaaaaaa)
10*292f86c4SAneesh Kumar K.V #define AMR_KUEP_BLOCKED	UL(0x5455555555555555)
113b47b754SAneesh Kumar K.V #define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
123b47b754SAneesh Kumar K.V 
133b47b754SAneesh Kumar K.V #ifdef __ASSEMBLY__
143b47b754SAneesh Kumar K.V 
158e560921SAneesh Kumar K.V .macro kuap_user_restore gpr1
168e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
173b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
188e560921SAneesh Kumar K.V 	/*
198e560921SAneesh Kumar K.V 	 * AMR and IAMR are going to be different when
208e560921SAneesh Kumar K.V 	 * returning to userspace.
218e560921SAneesh Kumar K.V 	 */
228e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_AMR(r1)
238e560921SAneesh Kumar K.V 	isync
248e560921SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr1
258e560921SAneesh Kumar K.V 	/*
268e560921SAneesh Kumar K.V 	 * Restore IAMR only when returning to userspace
278e560921SAneesh Kumar K.V 	 */
288e560921SAneesh Kumar K.V 	ld	\gpr1, STACK_REGS_IAMR(r1)
298e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr1
308e560921SAneesh Kumar K.V 
318e560921SAneesh Kumar K.V 	/* No isync required, see kuap_user_restore() */
328e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY, 67)
338e560921SAneesh Kumar K.V #endif
348e560921SAneesh Kumar K.V .endm
358e560921SAneesh Kumar K.V 
368e560921SAneesh Kumar K.V .macro kuap_kernel_restore	gpr1, gpr2
378e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
388e560921SAneesh Kumar K.V 
398e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
408e560921SAneesh Kumar K.V 	/*
418e560921SAneesh Kumar K.V 	 * AMR is going to be mostly the same since we are
428e560921SAneesh Kumar K.V 	 * returning to the kernel. Compare and do a mtspr.
438e560921SAneesh Kumar K.V 	 */
443b47b754SAneesh Kumar K.V 	ld	\gpr2, STACK_REGS_AMR(r1)
458e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
463b47b754SAneesh Kumar K.V 	cmpd	\gpr1, \gpr2
478e560921SAneesh Kumar K.V 	beq	100f
483b47b754SAneesh Kumar K.V 	isync
493b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
508e560921SAneesh Kumar K.V 	/*
518e560921SAneesh Kumar K.V 	 * No isync required, see kuap_restore_amr()
528e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
538e560921SAneesh Kumar K.V 	 */
548e560921SAneesh Kumar K.V 100:
55d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
563b47b754SAneesh Kumar K.V #endif
573b47b754SAneesh Kumar K.V .endm
583b47b754SAneesh Kumar K.V 
593b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
603b47b754SAneesh Kumar K.V .macro kuap_check_amr gpr1, gpr2
613b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP_DEBUG
623b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
633b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
64fa46c2faSAneesh Kumar K.V 	/* Prevent access to userspace using any key values */
65fa46c2faSAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
663b47b754SAneesh Kumar K.V 999:	tdne	\gpr1, \gpr2
673b47b754SAneesh Kumar K.V 	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
68d5b810b5SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
693b47b754SAneesh Kumar K.V #endif
703b47b754SAneesh Kumar K.V .endm
713b47b754SAneesh Kumar K.V #endif
723b47b754SAneesh Kumar K.V 
738e560921SAneesh Kumar K.V /*
748e560921SAneesh Kumar K.V  *	if (pkey) {
758e560921SAneesh Kumar K.V  *
768e560921SAneesh Kumar K.V  *		save AMR -> stack;
778e560921SAneesh Kumar K.V  *		if (kuap) {
788e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
798e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
808e560921SAneesh Kumar K.V  *		}
818e560921SAneesh Kumar K.V  *		if (from_user) {
828e560921SAneesh Kumar K.V  *			save IAMR -> stack;
838e560921SAneesh Kumar K.V  *			if (kuep) {
848e560921SAneesh Kumar K.V  *				KUEP_BLOCKED ->IAMR
858e560921SAneesh Kumar K.V  *			}
868e560921SAneesh Kumar K.V  *		}
878e560921SAneesh Kumar K.V  *		return;
888e560921SAneesh Kumar K.V  *	}
898e560921SAneesh Kumar K.V  *
908e560921SAneesh Kumar K.V  *	if (kuap) {
918e560921SAneesh Kumar K.V  *		if (from_kernel) {
928e560921SAneesh Kumar K.V  *			save AMR -> stack;
938e560921SAneesh Kumar K.V  *			if (AMR != BLOCKED)
948e560921SAneesh Kumar K.V  *				KUAP_BLOCKED -> AMR;
958e560921SAneesh Kumar K.V  *		}
968e560921SAneesh Kumar K.V  *
978e560921SAneesh Kumar K.V  *	}
988e560921SAneesh Kumar K.V  */
993b47b754SAneesh Kumar K.V .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
1008e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
1018e560921SAneesh Kumar K.V 
1028e560921SAneesh Kumar K.V 	/*
1038e560921SAneesh Kumar K.V 	 * if both pkey and kuap is disabled, nothing to do
1048e560921SAneesh Kumar K.V 	 */
1058e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(68)
1068e560921SAneesh Kumar K.V 	b	100f  // skip_save_amr
1078e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
1088e560921SAneesh Kumar K.V 
1098e560921SAneesh Kumar K.V 	/*
1108e560921SAneesh Kumar K.V 	 * if pkey is disabled and we are entering from userspace
1118e560921SAneesh Kumar K.V 	 * don't do anything.
1128e560921SAneesh Kumar K.V 	 */
1133b47b754SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(67)
1143b47b754SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1158e560921SAneesh Kumar K.V 	/*
1168e560921SAneesh Kumar K.V 	 * Without pkey we are not changing AMR outside the kernel
1178e560921SAneesh Kumar K.V 	 * hence skip this completely.
1188e560921SAneesh Kumar K.V 	 */
1198e560921SAneesh Kumar K.V 	bne	\msr_pr_cr, 100f  // from userspace
1203b47b754SAneesh Kumar K.V 	.endif
1218e560921SAneesh Kumar K.V         END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
1228e560921SAneesh Kumar K.V 
1238e560921SAneesh Kumar K.V 	/*
1248e560921SAneesh Kumar K.V 	 * pkey is enabled or pkey is disabled but entering from kernel
1258e560921SAneesh Kumar K.V 	 */
1263b47b754SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_AMR
1273b47b754SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_AMR(r1)
1288e560921SAneesh Kumar K.V 
1298e560921SAneesh Kumar K.V 	/*
1308e560921SAneesh Kumar K.V 	 * update kernel AMR with AMR_KUAP_BLOCKED only
1318e560921SAneesh Kumar K.V 	 * if KUAP feature is enabled
1328e560921SAneesh Kumar K.V 	 */
1338e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(69)
1348e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
1353b47b754SAneesh Kumar K.V 	cmpd	\use_cr, \gpr1, \gpr2
1368e560921SAneesh Kumar K.V 	beq	\use_cr, 102f
1378e560921SAneesh Kumar K.V 	/*
1388e560921SAneesh Kumar K.V 	 * We don't isync here because we very recently entered via an interrupt
1398e560921SAneesh Kumar K.V 	 */
1403b47b754SAneesh Kumar K.V 	mtspr	SPRN_AMR, \gpr2
1413b47b754SAneesh Kumar K.V 	isync
1428e560921SAneesh Kumar K.V 102:
1438e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
1448e560921SAneesh Kumar K.V 
1458e560921SAneesh Kumar K.V 	/*
1468e560921SAneesh Kumar K.V 	 * if entering from kernel we don't need save IAMR
1478e560921SAneesh Kumar K.V 	 */
1488e560921SAneesh Kumar K.V 	.ifnb \msr_pr_cr
1498e560921SAneesh Kumar K.V 	beq	\msr_pr_cr, 100f // from kernel space
1508e560921SAneesh Kumar K.V 	mfspr	\gpr1, SPRN_IAMR
1518e560921SAneesh Kumar K.V 	std	\gpr1, STACK_REGS_IAMR(r1)
1528e560921SAneesh Kumar K.V 
1538e560921SAneesh Kumar K.V 	/*
1548e560921SAneesh Kumar K.V 	 * update kernel IAMR with AMR_KUEP_BLOCKED only
1558e560921SAneesh Kumar K.V 	 * if KUEP feature is enabled
1568e560921SAneesh Kumar K.V 	 */
1578e560921SAneesh Kumar K.V 	BEGIN_MMU_FTR_SECTION_NESTED(70)
1588e560921SAneesh Kumar K.V 	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
1598e560921SAneesh Kumar K.V 	mtspr	SPRN_IAMR, \gpr2
1608e560921SAneesh Kumar K.V 	isync
1618e560921SAneesh Kumar K.V 	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
1628e560921SAneesh Kumar K.V 	.endif
1638e560921SAneesh Kumar K.V 
1648e560921SAneesh Kumar K.V 100: // skip_save_amr
1653b47b754SAneesh Kumar K.V #endif
1663b47b754SAneesh Kumar K.V .endm
1673b47b754SAneesh Kumar K.V 
1683b47b754SAneesh Kumar K.V #else /* !__ASSEMBLY__ */
1693b47b754SAneesh Kumar K.V 
1703b47b754SAneesh Kumar K.V #include <linux/jump_label.h>
1713b47b754SAneesh Kumar K.V 
1723b47b754SAneesh Kumar K.V DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
1733b47b754SAneesh Kumar K.V 
1748e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_PKEY
1753b47b754SAneesh Kumar K.V 
1763b47b754SAneesh Kumar K.V #include <asm/mmu.h>
1773b47b754SAneesh Kumar K.V #include <asm/ptrace.h>
1783b47b754SAneesh Kumar K.V 
17948a8ab4eSAneesh Kumar K.V /*
18048a8ab4eSAneesh Kumar K.V  * For kernel thread that doesn't have thread.regs return
18148a8ab4eSAneesh Kumar K.V  * default AMR/IAMR values.
18248a8ab4eSAneesh Kumar K.V  */
18348a8ab4eSAneesh Kumar K.V static inline u64 current_thread_amr(void)
18448a8ab4eSAneesh Kumar K.V {
18548a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
18648a8ab4eSAneesh Kumar K.V 		return current->thread.regs->amr;
18748a8ab4eSAneesh Kumar K.V 	return AMR_KUAP_BLOCKED;
18848a8ab4eSAneesh Kumar K.V }
18948a8ab4eSAneesh Kumar K.V 
19048a8ab4eSAneesh Kumar K.V static inline u64 current_thread_iamr(void)
19148a8ab4eSAneesh Kumar K.V {
19248a8ab4eSAneesh Kumar K.V 	if (current->thread.regs)
19348a8ab4eSAneesh Kumar K.V 		return current->thread.regs->iamr;
19448a8ab4eSAneesh Kumar K.V 	return AMR_KUEP_BLOCKED;
19548a8ab4eSAneesh Kumar K.V }
19648a8ab4eSAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
19748a8ab4eSAneesh Kumar K.V 
19848a8ab4eSAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
19948a8ab4eSAneesh Kumar K.V 
2008e560921SAneesh Kumar K.V static inline void kuap_user_restore(struct pt_regs *regs)
2013b47b754SAneesh Kumar K.V {
2028e560921SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_PKEY))
2038e560921SAneesh Kumar K.V 		return;
2048e560921SAneesh Kumar K.V 
2053b47b754SAneesh Kumar K.V 	isync();
2068e560921SAneesh Kumar K.V 	mtspr(SPRN_AMR, regs->amr);
2078e560921SAneesh Kumar K.V 	mtspr(SPRN_IAMR, regs->iamr);
2083b47b754SAneesh Kumar K.V 	/*
2098e560921SAneesh Kumar K.V 	 * No isync required here because we are about to rfi
2108e560921SAneesh Kumar K.V 	 * back to previous context before any user accesses
2118e560921SAneesh Kumar K.V 	 * would be made, which is a CSI.
2123b47b754SAneesh Kumar K.V 	 */
2133b47b754SAneesh Kumar K.V }
2148e560921SAneesh Kumar K.V static inline void kuap_kernel_restore(struct pt_regs *regs,
2158e560921SAneesh Kumar K.V 					   unsigned long amr)
2168e560921SAneesh Kumar K.V {
2178e560921SAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
2188e560921SAneesh Kumar K.V 		if (unlikely(regs->amr != amr)) {
2198e560921SAneesh Kumar K.V 			isync();
2208e560921SAneesh Kumar K.V 			mtspr(SPRN_AMR, regs->amr);
2218e560921SAneesh Kumar K.V 			/*
2228e560921SAneesh Kumar K.V 			 * No isync required here because we are about to rfi
2238e560921SAneesh Kumar K.V 			 * back to previous context before any user accesses
2248e560921SAneesh Kumar K.V 			 * would be made, which is a CSI.
2258e560921SAneesh Kumar K.V 			 */
2268e560921SAneesh Kumar K.V 		}
2278e560921SAneesh Kumar K.V 	}
2288e560921SAneesh Kumar K.V 	/*
2298e560921SAneesh Kumar K.V 	 * No need to restore IAMR when returning to kernel space.
2308e560921SAneesh Kumar K.V 	 */
2313b47b754SAneesh Kumar K.V }
2323b47b754SAneesh Kumar K.V 
2333b47b754SAneesh Kumar K.V static inline unsigned long kuap_get_and_check_amr(void)
2343b47b754SAneesh Kumar K.V {
235d5b810b5SAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
2363b47b754SAneesh Kumar K.V 		unsigned long amr = mfspr(SPRN_AMR);
2373b47b754SAneesh Kumar K.V 		if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
2383b47b754SAneesh Kumar K.V 			WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
2393b47b754SAneesh Kumar K.V 		return amr;
2403b47b754SAneesh Kumar K.V 	}
2413b47b754SAneesh Kumar K.V 	return 0;
2423b47b754SAneesh Kumar K.V }
2433b47b754SAneesh Kumar K.V 
2448e560921SAneesh Kumar K.V #else /* CONFIG_PPC_PKEY */
2458e560921SAneesh Kumar K.V 
2468e560921SAneesh Kumar K.V static inline void kuap_user_restore(struct pt_regs *regs)
2478e560921SAneesh Kumar K.V {
2488e560921SAneesh Kumar K.V }
2498e560921SAneesh Kumar K.V 
2508e560921SAneesh Kumar K.V static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
2518e560921SAneesh Kumar K.V {
2528e560921SAneesh Kumar K.V }
2538e560921SAneesh Kumar K.V 
2548e560921SAneesh Kumar K.V static inline unsigned long kuap_get_and_check_amr(void)
2558e560921SAneesh Kumar K.V {
2568e560921SAneesh Kumar K.V 	return 0;
2578e560921SAneesh Kumar K.V }
2588e560921SAneesh Kumar K.V 
2598e560921SAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
2608e560921SAneesh Kumar K.V 
2618e560921SAneesh Kumar K.V 
2628e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
2638e560921SAneesh Kumar K.V 
2643b47b754SAneesh Kumar K.V static inline void kuap_check_amr(void)
2653b47b754SAneesh Kumar K.V {
266d5b810b5SAneesh Kumar K.V 	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
2673b47b754SAneesh Kumar K.V 		WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
2683b47b754SAneesh Kumar K.V }
2693b47b754SAneesh Kumar K.V 
2703b47b754SAneesh Kumar K.V /*
2713b47b754SAneesh Kumar K.V  * We support individually allowing read or write, but we don't support nesting
2723b47b754SAneesh Kumar K.V  * because that would require an expensive read/modify write of the AMR.
2733b47b754SAneesh Kumar K.V  */
2743b47b754SAneesh Kumar K.V 
2753b47b754SAneesh Kumar K.V static inline unsigned long get_kuap(void)
2763b47b754SAneesh Kumar K.V {
2773b47b754SAneesh Kumar K.V 	/*
2783b47b754SAneesh Kumar K.V 	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
2793b47b754SAneesh Kumar K.V 	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
2803b47b754SAneesh Kumar K.V 	 * cause restore_user_access to do a flush.
2813b47b754SAneesh Kumar K.V 	 *
2823b47b754SAneesh Kumar K.V 	 * This has no effect in terms of actually blocking things on hash,
2833b47b754SAneesh Kumar K.V 	 * so it doesn't break anything.
2843b47b754SAneesh Kumar K.V 	 */
285d5b810b5SAneesh Kumar K.V 	if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
2863b47b754SAneesh Kumar K.V 		return AMR_KUAP_BLOCKED;
2873b47b754SAneesh Kumar K.V 
2883b47b754SAneesh Kumar K.V 	return mfspr(SPRN_AMR);
2893b47b754SAneesh Kumar K.V }
2903b47b754SAneesh Kumar K.V 
2913b47b754SAneesh Kumar K.V static inline void set_kuap(unsigned long value)
2923b47b754SAneesh Kumar K.V {
293d5b810b5SAneesh Kumar K.V 	if (!early_mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
2943b47b754SAneesh Kumar K.V 		return;
2953b47b754SAneesh Kumar K.V 
2963b47b754SAneesh Kumar K.V 	/*
2973b47b754SAneesh Kumar K.V 	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
2983b47b754SAneesh Kumar K.V 	 * before and after the move to AMR. See table 6 on page 1134.
2993b47b754SAneesh Kumar K.V 	 */
3003b47b754SAneesh Kumar K.V 	isync();
3013b47b754SAneesh Kumar K.V 	mtspr(SPRN_AMR, value);
3023b47b754SAneesh Kumar K.V 	isync();
3033b47b754SAneesh Kumar K.V }
3043b47b754SAneesh Kumar K.V 
305eb232b16SAneesh Kumar K.V #define RADIX_KUAP_BLOCK_READ	UL(0x4000000000000000)
306eb232b16SAneesh Kumar K.V #define RADIX_KUAP_BLOCK_WRITE	UL(0x8000000000000000)
307eb232b16SAneesh Kumar K.V 
308eb232b16SAneesh Kumar K.V static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
309eb232b16SAneesh Kumar K.V 				  bool is_write, unsigned long error_code)
3103b47b754SAneesh Kumar K.V {
311eb232b16SAneesh Kumar K.V 	if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
312eb232b16SAneesh Kumar K.V 		return false;
313eb232b16SAneesh Kumar K.V 
314eb232b16SAneesh Kumar K.V 	if (radix_enabled()) {
315eb232b16SAneesh Kumar K.V 		/*
316eb232b16SAneesh Kumar K.V 		 * Will be a storage protection fault.
317eb232b16SAneesh Kumar K.V 		 * Only check the details of AMR[0]
318eb232b16SAneesh Kumar K.V 		 */
319eb232b16SAneesh Kumar K.V 		return WARN((regs->kuap & (is_write ? RADIX_KUAP_BLOCK_WRITE : RADIX_KUAP_BLOCK_READ)),
3203b47b754SAneesh Kumar K.V 			    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
3213b47b754SAneesh Kumar K.V 	}
322eb232b16SAneesh Kumar K.V 	/*
323eb232b16SAneesh Kumar K.V 	 * We don't want to WARN here because userspace can setup
324eb232b16SAneesh Kumar K.V 	 * keys such that a kernel access to user address can cause
325eb232b16SAneesh Kumar K.V 	 * fault
326eb232b16SAneesh Kumar K.V 	 */
327eb232b16SAneesh Kumar K.V 	return !!(error_code & DSISR_KEYFAULT);
328eb232b16SAneesh Kumar K.V }
3293b47b754SAneesh Kumar K.V 
3303b47b754SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3313b47b754SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
3323b47b754SAneesh Kumar K.V {
3334d6c551eSAneesh Kumar K.V 	unsigned long thread_amr = 0;
3344d6c551eSAneesh Kumar K.V 
3353b47b754SAneesh Kumar K.V 	// This is written so we can resolve to a single case at build time
3363b47b754SAneesh Kumar K.V 	BUILD_BUG_ON(!__builtin_constant_p(dir));
3374d6c551eSAneesh Kumar K.V 
3384d6c551eSAneesh Kumar K.V 	if (mmu_has_feature(MMU_FTR_PKEY))
3394d6c551eSAneesh Kumar K.V 		thread_amr = current_thread_amr();
3404d6c551eSAneesh Kumar K.V 
3413b47b754SAneesh Kumar K.V 	if (dir == KUAP_READ)
3424d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
3433b47b754SAneesh Kumar K.V 	else if (dir == KUAP_WRITE)
3444d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
3453b47b754SAneesh Kumar K.V 	else if (dir == KUAP_READ_WRITE)
3464d6c551eSAneesh Kumar K.V 		set_kuap(thread_amr);
3473b47b754SAneesh Kumar K.V 	else
3483b47b754SAneesh Kumar K.V 		BUILD_BUG();
3493b47b754SAneesh Kumar K.V }
3503b47b754SAneesh Kumar K.V 
3518e560921SAneesh Kumar K.V #else /* CONFIG_PPC_KUAP */
3528e560921SAneesh Kumar K.V 
3538e560921SAneesh Kumar K.V static inline unsigned long get_kuap(void)
3548e560921SAneesh Kumar K.V {
3558e560921SAneesh Kumar K.V 	return AMR_KUAP_BLOCKED;
3568e560921SAneesh Kumar K.V }
3578e560921SAneesh Kumar K.V 
3588e560921SAneesh Kumar K.V static inline void set_kuap(unsigned long value) { }
3598e560921SAneesh Kumar K.V 
3608e560921SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3618e560921SAneesh Kumar K.V 					      unsigned long size, unsigned long dir)
3628e560921SAneesh Kumar K.V { }
3638e560921SAneesh Kumar K.V 
3648e560921SAneesh Kumar K.V #endif /* !CONFIG_PPC_KUAP */
3658e560921SAneesh Kumar K.V 
3663b47b754SAneesh Kumar K.V static inline void prevent_user_access(void __user *to, const void __user *from,
3673b47b754SAneesh Kumar K.V 				       unsigned long size, unsigned long dir)
3683b47b754SAneesh Kumar K.V {
3693b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
3703b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
3713b47b754SAneesh Kumar K.V 		do_uaccess_flush();
3723b47b754SAneesh Kumar K.V }
3733b47b754SAneesh Kumar K.V 
3743b47b754SAneesh Kumar K.V static inline unsigned long prevent_user_access_return(void)
3753b47b754SAneesh Kumar K.V {
3763b47b754SAneesh Kumar K.V 	unsigned long flags = get_kuap();
3773b47b754SAneesh Kumar K.V 
3783b47b754SAneesh Kumar K.V 	set_kuap(AMR_KUAP_BLOCKED);
3793b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key))
3803b47b754SAneesh Kumar K.V 		do_uaccess_flush();
3813b47b754SAneesh Kumar K.V 
3823b47b754SAneesh Kumar K.V 	return flags;
3833b47b754SAneesh Kumar K.V }
3843b47b754SAneesh Kumar K.V 
3853b47b754SAneesh Kumar K.V static inline void restore_user_access(unsigned long flags)
3863b47b754SAneesh Kumar K.V {
3873b47b754SAneesh Kumar K.V 	set_kuap(flags);
3883b47b754SAneesh Kumar K.V 	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
3893b47b754SAneesh Kumar K.V 		do_uaccess_flush();
3903b47b754SAneesh Kumar K.V }
3913b47b754SAneesh Kumar K.V #endif /* __ASSEMBLY__ */
3923b47b754SAneesh Kumar K.V 
3933b47b754SAneesh Kumar K.V #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
394