13b47b754SAneesh Kumar K.V /* SPDX-License-Identifier: GPL-2.0 */
23b47b754SAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
33b47b754SAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_64_KUP_H
43b47b754SAneesh Kumar K.V
53b47b754SAneesh Kumar K.V #include <linux/const.h>
63b47b754SAneesh Kumar K.V #include <asm/reg.h>
73b47b754SAneesh Kumar K.V
8fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
9fa46c2faSAneesh Kumar K.V #define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
10292f86c4SAneesh Kumar K.V #define AMR_KUEP_BLOCKED UL(0x5455555555555555)
113b47b754SAneesh Kumar K.V #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
123b47b754SAneesh Kumar K.V
133b47b754SAneesh Kumar K.V #ifdef __ASSEMBLY__
143b47b754SAneesh Kumar K.V
15ec0f9b98SAneesh Kumar K.V .macro kuap_user_restore gpr1, gpr2
168e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
173b47b754SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(67)
18ec0f9b98SAneesh Kumar K.V b 100f // skip_restore_amr
19ec0f9b98SAneesh Kumar K.V END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
208e560921SAneesh Kumar K.V /*
218e560921SAneesh Kumar K.V * AMR and IAMR are going to be different when
228e560921SAneesh Kumar K.V * returning to userspace.
238e560921SAneesh Kumar K.V */
248e560921SAneesh Kumar K.V ld \gpr1, STACK_REGS_AMR(r1)
25ec0f9b98SAneesh Kumar K.V
26ec0f9b98SAneesh Kumar K.V /*
27ec0f9b98SAneesh Kumar K.V * If kuap feature is not enabled, do the mtspr
28ec0f9b98SAneesh Kumar K.V * only if AMR value is different.
29ec0f9b98SAneesh Kumar K.V */
30ec0f9b98SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(68)
31ec0f9b98SAneesh Kumar K.V mfspr \gpr2, SPRN_AMR
32ec0f9b98SAneesh Kumar K.V cmpd \gpr1, \gpr2
33ec0f9b98SAneesh Kumar K.V beq 99f
344589a2b7SChristophe Leroy END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68)
35ec0f9b98SAneesh Kumar K.V
368e560921SAneesh Kumar K.V isync
378e560921SAneesh Kumar K.V mtspr SPRN_AMR, \gpr1
38ec0f9b98SAneesh Kumar K.V 99:
398e560921SAneesh Kumar K.V /*
408e560921SAneesh Kumar K.V * Restore IAMR only when returning to userspace
418e560921SAneesh Kumar K.V */
428e560921SAneesh Kumar K.V ld \gpr1, STACK_REGS_IAMR(r1)
43ec0f9b98SAneesh Kumar K.V
44ec0f9b98SAneesh Kumar K.V /*
45ec0f9b98SAneesh Kumar K.V * If kuep feature is not enabled, do the mtspr
46ec0f9b98SAneesh Kumar K.V * only if IAMR value is different.
47ec0f9b98SAneesh Kumar K.V */
48ec0f9b98SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(69)
49ec0f9b98SAneesh Kumar K.V mfspr \gpr2, SPRN_IAMR
50ec0f9b98SAneesh Kumar K.V cmpd \gpr1, \gpr2
51ec0f9b98SAneesh Kumar K.V beq 100f
52ec0f9b98SAneesh Kumar K.V END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53ec0f9b98SAneesh Kumar K.V
54ec0f9b98SAneesh Kumar K.V isync
558e560921SAneesh Kumar K.V mtspr SPRN_IAMR, \gpr1
568e560921SAneesh Kumar K.V
57ec0f9b98SAneesh Kumar K.V 100: //skip_restore_amr
588e560921SAneesh Kumar K.V /* No isync required, see kuap_user_restore() */
598e560921SAneesh Kumar K.V #endif
608e560921SAneesh Kumar K.V .endm
618e560921SAneesh Kumar K.V
628e560921SAneesh Kumar K.V .macro kuap_kernel_restore gpr1, gpr2
638e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
648e560921SAneesh Kumar K.V
658e560921SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(67)
668e560921SAneesh Kumar K.V /*
678e560921SAneesh Kumar K.V * AMR is going to be mostly the same since we are
688e560921SAneesh Kumar K.V * returning to the kernel. Compare and do a mtspr.
698e560921SAneesh Kumar K.V */
703b47b754SAneesh Kumar K.V ld \gpr2, STACK_REGS_AMR(r1)
718e560921SAneesh Kumar K.V mfspr \gpr1, SPRN_AMR
723b47b754SAneesh Kumar K.V cmpd \gpr1, \gpr2
738e560921SAneesh Kumar K.V beq 100f
743b47b754SAneesh Kumar K.V isync
753b47b754SAneesh Kumar K.V mtspr SPRN_AMR, \gpr2
768e560921SAneesh Kumar K.V /*
778e560921SAneesh Kumar K.V * No isync required, see kuap_restore_amr()
788e560921SAneesh Kumar K.V * No need to restore IAMR when returning to kernel space.
798e560921SAneesh Kumar K.V */
808e560921SAneesh Kumar K.V 100:
814589a2b7SChristophe Leroy END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
823b47b754SAneesh Kumar K.V #endif
833b47b754SAneesh Kumar K.V .endm
843b47b754SAneesh Kumar K.V
853b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
863b47b754SAneesh Kumar K.V .macro kuap_check_amr gpr1, gpr2
873b47b754SAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP_DEBUG
883b47b754SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(67)
893b47b754SAneesh Kumar K.V mfspr \gpr1, SPRN_AMR
90fa46c2faSAneesh Kumar K.V /* Prevent access to userspace using any key values */
91fa46c2faSAneesh Kumar K.V LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
923b47b754SAneesh Kumar K.V 999: tdne \gpr1, \gpr2
931e688dd2SChristophe Leroy EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
944589a2b7SChristophe Leroy END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
953b47b754SAneesh Kumar K.V #endif
963b47b754SAneesh Kumar K.V .endm
973b47b754SAneesh Kumar K.V #endif
983b47b754SAneesh Kumar K.V
998e560921SAneesh Kumar K.V /*
1008e560921SAneesh Kumar K.V * if (pkey) {
1018e560921SAneesh Kumar K.V *
1028e560921SAneesh Kumar K.V * save AMR -> stack;
1038e560921SAneesh Kumar K.V * if (kuap) {
1048e560921SAneesh Kumar K.V * if (AMR != BLOCKED)
1058e560921SAneesh Kumar K.V * KUAP_BLOCKED -> AMR;
1068e560921SAneesh Kumar K.V * }
1078e560921SAneesh Kumar K.V * if (from_user) {
1088e560921SAneesh Kumar K.V * save IAMR -> stack;
1098e560921SAneesh Kumar K.V * if (kuep) {
1108e560921SAneesh Kumar K.V * KUEP_BLOCKED ->IAMR
1118e560921SAneesh Kumar K.V * }
1128e560921SAneesh Kumar K.V * }
1138e560921SAneesh Kumar K.V * return;
1148e560921SAneesh Kumar K.V * }
1158e560921SAneesh Kumar K.V *
1168e560921SAneesh Kumar K.V * if (kuap) {
1178e560921SAneesh Kumar K.V * if (from_kernel) {
1188e560921SAneesh Kumar K.V * save AMR -> stack;
1198e560921SAneesh Kumar K.V * if (AMR != BLOCKED)
1208e560921SAneesh Kumar K.V * KUAP_BLOCKED -> AMR;
1218e560921SAneesh Kumar K.V * }
1228e560921SAneesh Kumar K.V *
1238e560921SAneesh Kumar K.V * }
1248e560921SAneesh Kumar K.V */
1253b47b754SAneesh Kumar K.V .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
1268e560921SAneesh Kumar K.V #if defined(CONFIG_PPC_PKEY)
1278e560921SAneesh Kumar K.V
1288e560921SAneesh Kumar K.V /*
1298e560921SAneesh Kumar K.V * if both pkey and kuap is disabled, nothing to do
1308e560921SAneesh Kumar K.V */
1318e560921SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(68)
1328e560921SAneesh Kumar K.V b 100f // skip_save_amr
1334589a2b7SChristophe Leroy END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_KUAP, 68)
1348e560921SAneesh Kumar K.V
1358e560921SAneesh Kumar K.V /*
1368e560921SAneesh Kumar K.V * if pkey is disabled and we are entering from userspace
1378e560921SAneesh Kumar K.V * don't do anything.
1388e560921SAneesh Kumar K.V */
1393b47b754SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(67)
1403b47b754SAneesh Kumar K.V .ifnb \msr_pr_cr
1418e560921SAneesh Kumar K.V /*
1428e560921SAneesh Kumar K.V * Without pkey we are not changing AMR outside the kernel
1438e560921SAneesh Kumar K.V * hence skip this completely.
1448e560921SAneesh Kumar K.V */
1458e560921SAneesh Kumar K.V bne \msr_pr_cr, 100f // from userspace
1463b47b754SAneesh Kumar K.V .endif
1478e560921SAneesh Kumar K.V END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
1488e560921SAneesh Kumar K.V
1498e560921SAneesh Kumar K.V /*
1508e560921SAneesh Kumar K.V * pkey is enabled or pkey is disabled but entering from kernel
1518e560921SAneesh Kumar K.V */
1523b47b754SAneesh Kumar K.V mfspr \gpr1, SPRN_AMR
1533b47b754SAneesh Kumar K.V std \gpr1, STACK_REGS_AMR(r1)
1548e560921SAneesh Kumar K.V
1558e560921SAneesh Kumar K.V /*
1568e560921SAneesh Kumar K.V * update kernel AMR with AMR_KUAP_BLOCKED only
1578e560921SAneesh Kumar K.V * if KUAP feature is enabled
1588e560921SAneesh Kumar K.V */
1598e560921SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(69)
1608e560921SAneesh Kumar K.V LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
1613b47b754SAneesh Kumar K.V cmpd \use_cr, \gpr1, \gpr2
1628e560921SAneesh Kumar K.V beq \use_cr, 102f
1638e560921SAneesh Kumar K.V /*
1648e560921SAneesh Kumar K.V * We don't isync here because we very recently entered via an interrupt
1658e560921SAneesh Kumar K.V */
1663b47b754SAneesh Kumar K.V mtspr SPRN_AMR, \gpr2
1673b47b754SAneesh Kumar K.V isync
1688e560921SAneesh Kumar K.V 102:
1694589a2b7SChristophe Leroy END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
1708e560921SAneesh Kumar K.V
1718e560921SAneesh Kumar K.V /*
1728e560921SAneesh Kumar K.V * if entering from kernel we don't need save IAMR
1738e560921SAneesh Kumar K.V */
1748e560921SAneesh Kumar K.V .ifnb \msr_pr_cr
1758e560921SAneesh Kumar K.V beq \msr_pr_cr, 100f // from kernel space
1768e560921SAneesh Kumar K.V mfspr \gpr1, SPRN_IAMR
1778e560921SAneesh Kumar K.V std \gpr1, STACK_REGS_IAMR(r1)
1788e560921SAneesh Kumar K.V
1798e560921SAneesh Kumar K.V /*
1808e560921SAneesh Kumar K.V * update kernel IAMR with AMR_KUEP_BLOCKED only
1818e560921SAneesh Kumar K.V * if KUEP feature is enabled
1828e560921SAneesh Kumar K.V */
1838e560921SAneesh Kumar K.V BEGIN_MMU_FTR_SECTION_NESTED(70)
1848e560921SAneesh Kumar K.V LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
1858e560921SAneesh Kumar K.V mtspr SPRN_IAMR, \gpr2
1868e560921SAneesh Kumar K.V isync
1878e560921SAneesh Kumar K.V END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
1888e560921SAneesh Kumar K.V .endif
1898e560921SAneesh Kumar K.V
1908e560921SAneesh Kumar K.V 100: // skip_save_amr
1913b47b754SAneesh Kumar K.V #endif
1923b47b754SAneesh Kumar K.V .endm
1933b47b754SAneesh Kumar K.V
1943b47b754SAneesh Kumar K.V #else /* !__ASSEMBLY__ */
1953b47b754SAneesh Kumar K.V
1963b47b754SAneesh Kumar K.V #include <linux/jump_label.h>
1977eec97b3SBenjamin Gray #include <linux/sched.h>
1983b47b754SAneesh Kumar K.V
1993b47b754SAneesh Kumar K.V DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
2003b47b754SAneesh Kumar K.V
2018e560921SAneesh Kumar K.V #ifdef CONFIG_PPC_PKEY
2023b47b754SAneesh Kumar K.V
2038c511effSAneesh Kumar K.V extern u64 __ro_after_init default_uamor;
2048c511effSAneesh Kumar K.V extern u64 __ro_after_init default_amr;
2058c511effSAneesh Kumar K.V extern u64 __ro_after_init default_iamr;
2068c511effSAneesh Kumar K.V
2073b47b754SAneesh Kumar K.V #include <asm/mmu.h>
2083b47b754SAneesh Kumar K.V #include <asm/ptrace.h>
2093b47b754SAneesh Kumar K.V
2108c511effSAneesh Kumar K.V /* usage of kthread_use_mm() should inherit the
2118c511effSAneesh Kumar K.V * AMR value of the operating address space. But, the AMR value is
2128c511effSAneesh Kumar K.V * thread-specific and we inherit the address space and not thread
2138c511effSAneesh Kumar K.V * access restrictions. Because of this ignore AMR value when accessing
2148c511effSAneesh Kumar K.V * userspace via kernel thread.
21548a8ab4eSAneesh Kumar K.V */
current_thread_amr(void)216*eb52f66fSChristophe Leroy static __always_inline u64 current_thread_amr(void)
21748a8ab4eSAneesh Kumar K.V {
21848a8ab4eSAneesh Kumar K.V if (current->thread.regs)
21948a8ab4eSAneesh Kumar K.V return current->thread.regs->amr;
2208c511effSAneesh Kumar K.V return default_amr;
22148a8ab4eSAneesh Kumar K.V }
22248a8ab4eSAneesh Kumar K.V
current_thread_iamr(void)223*eb52f66fSChristophe Leroy static __always_inline u64 current_thread_iamr(void)
22448a8ab4eSAneesh Kumar K.V {
22548a8ab4eSAneesh Kumar K.V if (current->thread.regs)
22648a8ab4eSAneesh Kumar K.V return current->thread.regs->iamr;
2278c511effSAneesh Kumar K.V return default_iamr;
22848a8ab4eSAneesh Kumar K.V }
22948a8ab4eSAneesh Kumar K.V #endif /* CONFIG_PPC_PKEY */
23048a8ab4eSAneesh Kumar K.V
23148a8ab4eSAneesh Kumar K.V #ifdef CONFIG_PPC_KUAP
23248a8ab4eSAneesh Kumar K.V
kuap_user_restore(struct pt_regs * regs)233*eb52f66fSChristophe Leroy static __always_inline void kuap_user_restore(struct pt_regs *regs)
2343b47b754SAneesh Kumar K.V {
235ec0f9b98SAneesh Kumar K.V bool restore_amr = false, restore_iamr = false;
236ec0f9b98SAneesh Kumar K.V unsigned long amr, iamr;
237ec0f9b98SAneesh Kumar K.V
2388e560921SAneesh Kumar K.V if (!mmu_has_feature(MMU_FTR_PKEY))
2398e560921SAneesh Kumar K.V return;
2408e560921SAneesh Kumar K.V
2414589a2b7SChristophe Leroy if (!mmu_has_feature(MMU_FTR_KUAP)) {
242ec0f9b98SAneesh Kumar K.V amr = mfspr(SPRN_AMR);
243ec0f9b98SAneesh Kumar K.V if (amr != regs->amr)
244ec0f9b98SAneesh Kumar K.V restore_amr = true;
245ec0f9b98SAneesh Kumar K.V } else {
246ec0f9b98SAneesh Kumar K.V restore_amr = true;
247ec0f9b98SAneesh Kumar K.V }
248ec0f9b98SAneesh Kumar K.V
249ec0f9b98SAneesh Kumar K.V if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
250ec0f9b98SAneesh Kumar K.V iamr = mfspr(SPRN_IAMR);
251ec0f9b98SAneesh Kumar K.V if (iamr != regs->iamr)
252ec0f9b98SAneesh Kumar K.V restore_iamr = true;
253ec0f9b98SAneesh Kumar K.V } else {
254ec0f9b98SAneesh Kumar K.V restore_iamr = true;
255ec0f9b98SAneesh Kumar K.V }
256ec0f9b98SAneesh Kumar K.V
257ec0f9b98SAneesh Kumar K.V
258ec0f9b98SAneesh Kumar K.V if (restore_amr || restore_iamr) {
2593b47b754SAneesh Kumar K.V isync();
260ec0f9b98SAneesh Kumar K.V if (restore_amr)
2618e560921SAneesh Kumar K.V mtspr(SPRN_AMR, regs->amr);
262ec0f9b98SAneesh Kumar K.V if (restore_iamr)
2638e560921SAneesh Kumar K.V mtspr(SPRN_IAMR, regs->iamr);
264ec0f9b98SAneesh Kumar K.V }
2653b47b754SAneesh Kumar K.V /*
2668e560921SAneesh Kumar K.V * No isync required here because we are about to rfi
2678e560921SAneesh Kumar K.V * back to previous context before any user accesses
2688e560921SAneesh Kumar K.V * would be made, which is a CSI.
2693b47b754SAneesh Kumar K.V */
2703b47b754SAneesh Kumar K.V }
271ec0f9b98SAneesh Kumar K.V
__kuap_kernel_restore(struct pt_regs * regs,unsigned long amr)272*eb52f66fSChristophe Leroy static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
2738e560921SAneesh Kumar K.V {
274c252f384SChristophe Leroy if (likely(regs->amr == amr))
275c252f384SChristophe Leroy return;
276c252f384SChristophe Leroy
2778e560921SAneesh Kumar K.V isync();
2788e560921SAneesh Kumar K.V mtspr(SPRN_AMR, regs->amr);
2798e560921SAneesh Kumar K.V /*
2808e560921SAneesh Kumar K.V * No isync required here because we are about to rfi
2818e560921SAneesh Kumar K.V * back to previous context before any user accesses
2828e560921SAneesh Kumar K.V * would be made, which is a CSI.
283c252f384SChristophe Leroy *
2848e560921SAneesh Kumar K.V * No need to restore IAMR when returning to kernel space.
2858e560921SAneesh Kumar K.V */
2863b47b754SAneesh Kumar K.V }
2873b47b754SAneesh Kumar K.V
__kuap_get_and_assert_locked(void)288*eb52f66fSChristophe Leroy static __always_inline unsigned long __kuap_get_and_assert_locked(void)
2893b47b754SAneesh Kumar K.V {
2903b47b754SAneesh Kumar K.V unsigned long amr = mfspr(SPRN_AMR);
291c252f384SChristophe Leroy
2923b47b754SAneesh Kumar K.V if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
2933b47b754SAneesh Kumar K.V WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
2943b47b754SAneesh Kumar K.V return amr;
2953b47b754SAneesh Kumar K.V }
2961bec4adcSChristophe Leroy #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
2973b47b754SAneesh Kumar K.V
2981bec4adcSChristophe Leroy /* __kuap_lock() not required, book3s/64 does that in ASM */
29942e03bc5SChristophe Leroy
3003b47b754SAneesh Kumar K.V /*
3013b47b754SAneesh Kumar K.V * We support individually allowing read or write, but we don't support nesting
3023b47b754SAneesh Kumar K.V * because that would require an expensive read/modify write of the AMR.
3033b47b754SAneesh Kumar K.V */
3043b47b754SAneesh Kumar K.V
get_kuap(void)305*eb52f66fSChristophe Leroy static __always_inline unsigned long get_kuap(void)
3063b47b754SAneesh Kumar K.V {
3073b47b754SAneesh Kumar K.V /*
3083b47b754SAneesh Kumar K.V * We return AMR_KUAP_BLOCKED when we don't support KUAP because
3093b47b754SAneesh Kumar K.V * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
3103b47b754SAneesh Kumar K.V * cause restore_user_access to do a flush.
3113b47b754SAneesh Kumar K.V *
3123b47b754SAneesh Kumar K.V * This has no effect in terms of actually blocking things on hash,
3133b47b754SAneesh Kumar K.V * so it doesn't break anything.
3143b47b754SAneesh Kumar K.V */
3154589a2b7SChristophe Leroy if (!mmu_has_feature(MMU_FTR_KUAP))
3163b47b754SAneesh Kumar K.V return AMR_KUAP_BLOCKED;
3173b47b754SAneesh Kumar K.V
3183b47b754SAneesh Kumar K.V return mfspr(SPRN_AMR);
3193b47b754SAneesh Kumar K.V }
3203b47b754SAneesh Kumar K.V
set_kuap(unsigned long value)32179299391SChristophe Leroy static __always_inline void set_kuap(unsigned long value)
3223b47b754SAneesh Kumar K.V {
3234589a2b7SChristophe Leroy if (!mmu_has_feature(MMU_FTR_KUAP))
3243b47b754SAneesh Kumar K.V return;
3253b47b754SAneesh Kumar K.V
3263b47b754SAneesh Kumar K.V /*
3273b47b754SAneesh Kumar K.V * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
3283b47b754SAneesh Kumar K.V * before and after the move to AMR. See table 6 on page 1134.
3293b47b754SAneesh Kumar K.V */
3303b47b754SAneesh Kumar K.V isync();
3313b47b754SAneesh Kumar K.V mtspr(SPRN_AMR, value);
3323b47b754SAneesh Kumar K.V isync();
3333b47b754SAneesh Kumar K.V }
3343b47b754SAneesh Kumar K.V
335*eb52f66fSChristophe Leroy static __always_inline bool
__bad_kuap_fault(struct pt_regs * regs,unsigned long address,bool is_write)336*eb52f66fSChristophe Leroy __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
3373b47b754SAneesh Kumar K.V {
338eb232b16SAneesh Kumar K.V /*
339475c8749SAneesh Kumar K.V * For radix this will be a storage protection fault (DSISR_PROTFAULT).
340475c8749SAneesh Kumar K.V * For hash this will be a key fault (DSISR_KEYFAULT)
341eb232b16SAneesh Kumar K.V */
342475c8749SAneesh Kumar K.V /*
343475c8749SAneesh Kumar K.V * We do have exception table entry, but accessing the
344475c8749SAneesh Kumar K.V * userspace results in fault. This could be because we
345475c8749SAneesh Kumar K.V * didn't unlock the AMR or access is denied by userspace
346475c8749SAneesh Kumar K.V * using a key value that blocks access. We are only interested
347475c8749SAneesh Kumar K.V * in catching the use case of accessing without unlocking
348475c8749SAneesh Kumar K.V * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
349475c8749SAneesh Kumar K.V */
350475c8749SAneesh Kumar K.V if (is_write) {
3513dc12dfeSChristophe Leroy return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
3523b47b754SAneesh Kumar K.V }
3533dc12dfeSChristophe Leroy return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
354eb232b16SAneesh Kumar K.V }
3553b47b754SAneesh Kumar K.V
allow_user_access(void __user * to,const void __user * from,unsigned long size,unsigned long dir)3563b47b754SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3573b47b754SAneesh Kumar K.V unsigned long size, unsigned long dir)
3583b47b754SAneesh Kumar K.V {
3594d6c551eSAneesh Kumar K.V unsigned long thread_amr = 0;
3604d6c551eSAneesh Kumar K.V
3613b47b754SAneesh Kumar K.V // This is written so we can resolve to a single case at build time
3623b47b754SAneesh Kumar K.V BUILD_BUG_ON(!__builtin_constant_p(dir));
3634d6c551eSAneesh Kumar K.V
3644d6c551eSAneesh Kumar K.V if (mmu_has_feature(MMU_FTR_PKEY))
3654d6c551eSAneesh Kumar K.V thread_amr = current_thread_amr();
3664d6c551eSAneesh Kumar K.V
3673b47b754SAneesh Kumar K.V if (dir == KUAP_READ)
3684d6c551eSAneesh Kumar K.V set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
3693b47b754SAneesh Kumar K.V else if (dir == KUAP_WRITE)
3704d6c551eSAneesh Kumar K.V set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
3713b47b754SAneesh Kumar K.V else if (dir == KUAP_READ_WRITE)
3724d6c551eSAneesh Kumar K.V set_kuap(thread_amr);
3733b47b754SAneesh Kumar K.V else
3743b47b754SAneesh Kumar K.V BUILD_BUG();
3753b47b754SAneesh Kumar K.V }
3763b47b754SAneesh Kumar K.V
3778e560921SAneesh Kumar K.V #else /* CONFIG_PPC_KUAP */
3788e560921SAneesh Kumar K.V
get_kuap(void)379*eb52f66fSChristophe Leroy static __always_inline unsigned long get_kuap(void)
3808e560921SAneesh Kumar K.V {
3818e560921SAneesh Kumar K.V return AMR_KUAP_BLOCKED;
3828e560921SAneesh Kumar K.V }
3838e560921SAneesh Kumar K.V
set_kuap(unsigned long value)384*eb52f66fSChristophe Leroy static __always_inline void set_kuap(unsigned long value) { }
3858e560921SAneesh Kumar K.V
allow_user_access(void __user * to,const void __user * from,unsigned long size,unsigned long dir)3868e560921SAneesh Kumar K.V static __always_inline void allow_user_access(void __user *to, const void __user *from,
3878e560921SAneesh Kumar K.V unsigned long size, unsigned long dir)
3888e560921SAneesh Kumar K.V { }
3898e560921SAneesh Kumar K.V
3908e560921SAneesh Kumar K.V #endif /* !CONFIG_PPC_KUAP */
3918e560921SAneesh Kumar K.V
prevent_user_access(unsigned long dir)39279299391SChristophe Leroy static __always_inline void prevent_user_access(unsigned long dir)
3933b47b754SAneesh Kumar K.V {
3943b47b754SAneesh Kumar K.V set_kuap(AMR_KUAP_BLOCKED);
3953b47b754SAneesh Kumar K.V if (static_branch_unlikely(&uaccess_flush_key))
3963b47b754SAneesh Kumar K.V do_uaccess_flush();
3973b47b754SAneesh Kumar K.V }
3983b47b754SAneesh Kumar K.V
prevent_user_access_return(void)399*eb52f66fSChristophe Leroy static __always_inline unsigned long prevent_user_access_return(void)
4003b47b754SAneesh Kumar K.V {
4013b47b754SAneesh Kumar K.V unsigned long flags = get_kuap();
4023b47b754SAneesh Kumar K.V
4033b47b754SAneesh Kumar K.V set_kuap(AMR_KUAP_BLOCKED);
4043b47b754SAneesh Kumar K.V if (static_branch_unlikely(&uaccess_flush_key))
4053b47b754SAneesh Kumar K.V do_uaccess_flush();
4063b47b754SAneesh Kumar K.V
4073b47b754SAneesh Kumar K.V return flags;
4083b47b754SAneesh Kumar K.V }
4093b47b754SAneesh Kumar K.V
restore_user_access(unsigned long flags)410*eb52f66fSChristophe Leroy static __always_inline void restore_user_access(unsigned long flags)
4113b47b754SAneesh Kumar K.V {
4123b47b754SAneesh Kumar K.V set_kuap(flags);
4133b47b754SAneesh Kumar K.V if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
4143b47b754SAneesh Kumar K.V do_uaccess_flush();
4153b47b754SAneesh Kumar K.V }
4163b47b754SAneesh Kumar K.V #endif /* __ASSEMBLY__ */
4173b47b754SAneesh Kumar K.V
4183b47b754SAneesh Kumar K.V #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
419