Lines Matching +full:supervisor +full:- +full:level
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
13 #include <asm/asm-const.h>
46 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
114 #define PP_RWXX 0 /* Supervisor read/write, User none */
115 #define PP_RWRX 1 /* Supervisor read/write, User read */
116 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
117 #define PP_RXRX 3 /* Supervisor read, User read */
118 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
192 return -1; in shift_to_mmu_psize()
211 return -1; in ap_to_shift()
285 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); in __hpte_page_size()
315 * to use 4k pages when they create cache-inhibited mappings.
330 * The AVA field omits the low-order 23 bits of the 78 bits VA. in hpte_encode_avpn()
332 * low-order b of these bits are part of the byte offset in hpte_encode_avpn()
333 * into the virtual page and, if b < 23, the high-order in hpte_encode_avpn()
334 * 23-b of these bits are always used in selecting the in hpte_encode_avpn()
337 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); in hpte_encode_avpn()
366 (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT)); in hpte_new_to_old_v()
379 hpte_v = be64_to_cpu(hptep->v); in hpte_get_old_v()
381 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r)); in hpte_get_old_v()
413 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); in hpte_encode_r()
426 mask = (1ul << (s_shift - VPN_SHIFT)) - 1; in hpt_vpn()
427 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); in hpt_vpn()
441 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; in hpt_hash()
442 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ in hpt_hash()
443 ((vpn & mask) >> (shift - VPN_SHIFT)); in hpt_hash()
445 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; in hpt_hash()
446 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); in hpt_hash()
448 ((vpn & mask) >> (shift - VPN_SHIFT)) ; in hpt_hash()
488 return -1; in __hash_page_thp()
540 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
548 * The proto-VSIDs are then scrambled into real VSIDs with the
551 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
554 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
555 * Because the modulus is 2^n-1 we can compute it efficiently without
561 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
580 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
581 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
583 #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
584 #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
593 #define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
604 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
606 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
614 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
623 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
628 * co-prime to vsid_modulus. We also need to make sure that number
633 * |-------+------------+----------------------+------------+-------------------|
635 * |-------+------------+----------------------+------------+-------------------|
637 * |-------+------------+----------------------+------------+-------------------|
639 * |-------+------------+----------------------+------------+-------------------|
641 * |-------+------------+----------------------+------------+--------------------|
643 * |-------+------------+----------------------+------------+--------------------|
645 * |-------+------------+----------------------+------------+--------------------|
647 * |-------+------------+----------------------+------------+--------------------|
650 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
651 #define VSID_BITS_256M (VA_BITS - SID_SHIFT)
652 #define VSID_BITS_65_256M (65 - SID_SHIFT)
658 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
659 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
660 #define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
670 #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
675 * For the sub-page protection option, we extend the PGD with one of
676 * these. Basically we have a 3-level tree, with the top level being
679 * four pages of sub-page protection words are stored in the low_prot
681 * Each page of sub-page protection words protects 1GB (4 bytes
682 * protects 64k). For the 3-level tree, each page of pointers then
691 #define SBP_L1_BITS (PAGE_SHIFT - 2)
692 #define SBP_L2_BITS (PAGE_SHIFT - 3)
739 * modulus (2^n-1) without a second multiply.
758 unsigned long vsid_modulus = ((1UL << vsid_bits) - 1); in vsid_scramble()
795 vsid_bits = va_bits - SID_SHIFT; in get_vsid()
801 vsid_bits = va_bits - SID_SHIFT_1T; in get_vsid()
811 * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
812 * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
813 * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
814 * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
818 * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
819 * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
820 * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
837 ctx = region_id + MAX_KERNEL_CTX_CNT - 1; in get_kernel_context()