1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 217ed9e31SAneesh Kumar K.V #ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H 317ed9e31SAneesh Kumar K.V #define _ASM_POWERPC_NOHASH_32_PTE_44x_H 417ed9e31SAneesh Kumar K.V #ifdef __KERNEL__ 517ed9e31SAneesh Kumar K.V 617ed9e31SAneesh Kumar K.V /* 717ed9e31SAneesh Kumar K.V * Definitions for PPC440 817ed9e31SAneesh Kumar K.V * 917ed9e31SAneesh Kumar K.V * Because of the 3 word TLB entries to support 36-bit addressing, 1017ed9e31SAneesh Kumar K.V * the attribute are difficult to map in such a fashion that they 1117ed9e31SAneesh Kumar K.V * are easily loaded during exception processing. I decided to 1217ed9e31SAneesh Kumar K.V * organize the entry so the ERPN is the only portion in the 1317ed9e31SAneesh Kumar K.V * upper word of the PTE and the attribute bits below are packed 1417ed9e31SAneesh Kumar K.V * in as sensibly as they can be in the area below a 4KB page size 1517ed9e31SAneesh Kumar K.V * oriented RPN. This at least makes it easy to load the RPN and 1617ed9e31SAneesh Kumar K.V * ERPN fields in the TLB. -Matt 1717ed9e31SAneesh Kumar K.V * 1817ed9e31SAneesh Kumar K.V * This isn't entirely true anymore, at least some bits are now 1917ed9e31SAneesh Kumar K.V * easier to move into the TLB from the PTE. -BenH. 2017ed9e31SAneesh Kumar K.V * 2117ed9e31SAneesh Kumar K.V * Note that these bits preclude future use of a page size 2217ed9e31SAneesh Kumar K.V * less than 4KB. 2317ed9e31SAneesh Kumar K.V * 2417ed9e31SAneesh Kumar K.V * 2517ed9e31SAneesh Kumar K.V * PPC 440 core has following TLB attribute fields; 2617ed9e31SAneesh Kumar K.V * 2717ed9e31SAneesh Kumar K.V * TLB1: 2817ed9e31SAneesh Kumar K.V * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 2917ed9e31SAneesh Kumar K.V * RPN................................. - - - - - - ERPN....... 3017ed9e31SAneesh Kumar K.V * 3117ed9e31SAneesh Kumar K.V * TLB2: 3217ed9e31SAneesh Kumar K.V * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 3317ed9e31SAneesh Kumar K.V * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR 3417ed9e31SAneesh Kumar K.V * 3517ed9e31SAneesh Kumar K.V * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional 36027dfac6SMichael Ellerman * TLB2 storage attribute fields. Those are: 3717ed9e31SAneesh Kumar K.V * 3817ed9e31SAneesh Kumar K.V * TLB2: 3917ed9e31SAneesh Kumar K.V * 0...10 11 12 13 14 15 16...31 4017ed9e31SAneesh Kumar K.V * no change WL1 IL1I IL1D IL2I IL2D no change 4117ed9e31SAneesh Kumar K.V * 4217ed9e31SAneesh Kumar K.V * There are some constrains and options, to decide mapping software bits 4317ed9e31SAneesh Kumar K.V * into TLB entry. 4417ed9e31SAneesh Kumar K.V * 4517ed9e31SAneesh Kumar K.V * - PRESENT *must* be in the bottom three bits because swap cache 4617ed9e31SAneesh Kumar K.V * entries use the top 29 bits for TLB2. 4717ed9e31SAneesh Kumar K.V * 4817ed9e31SAneesh Kumar K.V * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, 4917ed9e31SAneesh Kumar K.V * because it doesn't support SMP. However, some later 460 variants 5017ed9e31SAneesh Kumar K.V * have -some- form of SMP support and so I keep the bit there for 5117ed9e31SAneesh Kumar K.V * future use 5217ed9e31SAneesh Kumar K.V * 5317ed9e31SAneesh Kumar K.V * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used 5417ed9e31SAneesh Kumar K.V * for memory protection related functions (see PTE structure in 5517ed9e31SAneesh Kumar K.V * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the 5617ed9e31SAneesh Kumar K.V * above bits. Note that the bit values are CPU specific, not architecture 5717ed9e31SAneesh Kumar K.V * specific. 5817ed9e31SAneesh Kumar K.V * 5917ed9e31SAneesh Kumar K.V * The kernel PTE entry holds an arch-dependent swp_entry structure under 6017ed9e31SAneesh Kumar K.V * certain situations. In other words, in such situations some portion of 6117ed9e31SAneesh Kumar K.V * the PTE bits are used as a swp_entry. In the PPC implementation, the 6217ed9e31SAneesh Kumar K.V * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still 6317ed9e31SAneesh Kumar K.V * hold protection values. That means the three protection bits are 6417ed9e31SAneesh Kumar K.V * reserved for both PTE and SWAP entry at the most significant three 6517ed9e31SAneesh Kumar K.V * LSBs. 6617ed9e31SAneesh Kumar K.V * 6717ed9e31SAneesh Kumar K.V * There are three protection bits available for SWAP entry: 6817ed9e31SAneesh Kumar K.V * _PAGE_PRESENT 6917ed9e31SAneesh Kumar K.V * _PAGE_HASHPTE (if HW has) 7017ed9e31SAneesh Kumar K.V * 7117ed9e31SAneesh Kumar K.V * So those three bits have to be inside of 0-2nd LSB of PTE. 7217ed9e31SAneesh Kumar K.V * 7317ed9e31SAneesh Kumar K.V */ 7417ed9e31SAneesh Kumar K.V 7517ed9e31SAneesh Kumar K.V #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ 7617ed9e31SAneesh Kumar K.V #define _PAGE_RW 0x00000002 /* S: Write permission */ 7717ed9e31SAneesh Kumar K.V #define _PAGE_EXEC 0x00000004 /* H: Execute permission */ 7817ed9e31SAneesh Kumar K.V #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 7917ed9e31SAneesh Kumar K.V #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ 8017ed9e31SAneesh Kumar K.V #define _PAGE_SPECIAL 0x00000020 /* S: Special page */ 8117ed9e31SAneesh Kumar K.V #define _PAGE_USER 0x00000040 /* S: User page */ 8217ed9e31SAneesh Kumar K.V #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ 8317ed9e31SAneesh Kumar K.V #define _PAGE_GUARDED 0x00000100 /* H: G bit */ 8417ed9e31SAneesh Kumar K.V #define _PAGE_COHERENT 0x00000200 /* H: M bit */ 8517ed9e31SAneesh Kumar K.V #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ 8617ed9e31SAneesh Kumar K.V #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ 8717ed9e31SAneesh Kumar K.V 88*d82fd29cSChristophe Leroy /* No page size encoding in the linux PTE */ 89*d82fd29cSChristophe Leroy #define _PAGE_PSIZE 0 90*d82fd29cSChristophe Leroy 91*d82fd29cSChristophe Leroy #define _PAGE_KERNEL_RO 0 92*d82fd29cSChristophe Leroy #define _PAGE_KERNEL_ROX _PAGE_EXEC 93*d82fd29cSChristophe Leroy #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) 94*d82fd29cSChristophe Leroy #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) 95*d82fd29cSChristophe Leroy 96*d82fd29cSChristophe Leroy /* Mask of bits returned by pte_pgprot() */ 97*d82fd29cSChristophe Leroy #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 98*d82fd29cSChristophe Leroy _PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \ 99*d82fd29cSChristophe Leroy _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC) 100*d82fd29cSChristophe Leroy 10117ed9e31SAneesh Kumar K.V /* TODO: Add large page lowmem mapping support */ 10217ed9e31SAneesh Kumar K.V #define _PMD_PRESENT 0 10317ed9e31SAneesh Kumar K.V #define _PMD_PRESENT_MASK (PAGE_MASK) 10417ed9e31SAneesh Kumar K.V #define _PMD_BAD (~PAGE_MASK) 105*d82fd29cSChristophe Leroy #define _PMD_USER 0 10617ed9e31SAneesh Kumar K.V 10717ed9e31SAneesh Kumar K.V /* ERPN in a PTE never gets cleared, ignore it */ 10817ed9e31SAneesh Kumar K.V #define _PTE_NONE_MASK 0xffffffff00000000ULL 10917ed9e31SAneesh Kumar K.V 110*d82fd29cSChristophe Leroy /* 111*d82fd29cSChristophe Leroy * We define 2 sets of base prot bits, one for basic pages (ie, 112*d82fd29cSChristophe Leroy * cacheable kernel and user pages) and one for non cacheable 113*d82fd29cSChristophe Leroy * pages. We always set _PAGE_COHERENT when SMP is enabled or 114*d82fd29cSChristophe Leroy * the processor might need it for DMA coherency. 115*d82fd29cSChristophe Leroy */ 116*d82fd29cSChristophe Leroy #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) 117*d82fd29cSChristophe Leroy #if defined(CONFIG_SMP) 118*d82fd29cSChristophe Leroy #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) 119*d82fd29cSChristophe Leroy #else 120*d82fd29cSChristophe Leroy #define _PAGE_BASE (_PAGE_BASE_NC) 121*d82fd29cSChristophe Leroy #endif 122*d82fd29cSChristophe Leroy 123*d82fd29cSChristophe Leroy /* Permission masks used to generate the __P and __S table */ 124*d82fd29cSChristophe Leroy #define PAGE_NONE __pgprot(_PAGE_BASE) 125*d82fd29cSChristophe Leroy #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 126*d82fd29cSChristophe Leroy #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 127*d82fd29cSChristophe Leroy #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 128*d82fd29cSChristophe Leroy #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 129*d82fd29cSChristophe Leroy #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 130*d82fd29cSChristophe Leroy #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 13117ed9e31SAneesh Kumar K.V 13217ed9e31SAneesh Kumar K.V #endif /* __KERNEL__ */ 13317ed9e31SAneesh Kumar K.V #endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ 134