1c6557e7fSMartin Schwidefsky /* 2c6557e7fSMartin Schwidefsky * S390 version 3c6557e7fSMartin Schwidefsky * 4c6557e7fSMartin Schwidefsky * Derived from "include/asm-i386/mmu_context.h" 5c6557e7fSMartin Schwidefsky */ 6c6557e7fSMartin Schwidefsky 7c6557e7fSMartin Schwidefsky #ifndef __S390_MMU_CONTEXT_H 8c6557e7fSMartin Schwidefsky #define __S390_MMU_CONTEXT_H 9c6557e7fSMartin Schwidefsky 10c6557e7fSMartin Schwidefsky #include <asm/pgalloc.h> 11c6557e7fSMartin Schwidefsky #include <asm/uaccess.h> 12050eef36SMartin Schwidefsky #include <asm/tlbflush.h> 13a0616cdeSDavid Howells #include <asm/ctl_reg.h> 14c6557e7fSMartin Schwidefsky 15c6557e7fSMartin Schwidefsky static inline int init_new_context(struct task_struct *tsk, 16c6557e7fSMartin Schwidefsky struct mm_struct *mm) 17c6557e7fSMartin Schwidefsky { 183446c13bSMartin Schwidefsky spin_lock_init(&mm->context.list_lock); 193446c13bSMartin Schwidefsky INIT_LIST_HEAD(&mm->context.pgtable_list); 203446c13bSMartin Schwidefsky INIT_LIST_HEAD(&mm->context.gmap_list); 211b948d6cSMartin Schwidefsky cpumask_clear(&mm->context.cpu_attach_mask); 22*64f31d58SMartin Schwidefsky atomic_set(&mm->context.flush_count, 0); 23050eef36SMartin Schwidefsky mm->context.flush_mm = 0; 240b46e0a3SMartin Schwidefsky #ifdef CONFIG_PGSTE 250b46e0a3SMartin Schwidefsky mm->context.alloc_pgste = page_table_allocate_pgste; 26250cf776SChristian Borntraeger mm->context.has_pgste = 0; 27693ffc08SDominik Dingel mm->context.use_skey = 0; 280b46e0a3SMartin Schwidefsky #endif 29723cacbdSGerald Schaefer switch (mm->context.asce_limit) { 30723cacbdSGerald Schaefer case 1UL << 42: 31723cacbdSGerald Schaefer /* 32723cacbdSGerald Schaefer * forked 3-level task, fall through to set new asce with new 33723cacbdSGerald Schaefer * mm->pgd 34723cacbdSGerald Schaefer */ 35723cacbdSGerald Schaefer case 0: 363446c13bSMartin Schwidefsky /* context created by exec, set asce limit to 4TB */ 37c6557e7fSMartin Schwidefsky mm->context.asce_limit = STACK_TOP_MAX; 38723cacbdSGerald Schaefer mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 39723cacbdSGerald Schaefer _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 40723cacbdSGerald Schaefer break; 41723cacbdSGerald Schaefer case 1UL << 53: 42723cacbdSGerald Schaefer /* forked 4-level task, set new asce with new mm->pgd */ 43723cacbdSGerald Schaefer mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 44723cacbdSGerald Schaefer _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 45723cacbdSGerald Schaefer break; 46723cacbdSGerald Schaefer case 1UL << 31: 47723cacbdSGerald Schaefer /* forked 2-level compat task, set new asce with new mm->pgd */ 48723cacbdSGerald Schaefer mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 49723cacbdSGerald Schaefer _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 50723cacbdSGerald Schaefer /* pgd_alloc() did not increase mm->nr_pmds */ 513446c13bSMartin Schwidefsky mm_inc_nr_pmds(mm); 523446c13bSMartin Schwidefsky } 53c6557e7fSMartin Schwidefsky crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 54c6557e7fSMartin Schwidefsky return 0; 55c6557e7fSMartin Schwidefsky } 56c6557e7fSMartin Schwidefsky 57c6557e7fSMartin Schwidefsky #define destroy_context(mm) do { } while (0) 58c6557e7fSMartin Schwidefsky 59beef560bSMartin Schwidefsky static inline void set_user_asce(struct mm_struct *mm) 60c6557e7fSMartin Schwidefsky { 61723cacbdSGerald Schaefer S390_lowcore.user_asce = mm->context.asce; 62f8b13505SMartin Schwidefsky if (current->thread.mm_segment.ar4) 63f8b13505SMartin Schwidefsky __ctl_load(S390_lowcore.user_asce, 7, 7); 64d3a73acbSMartin Schwidefsky set_cpu_flag(CIF_ASCE); 65c6557e7fSMartin Schwidefsky } 66c6557e7fSMartin Schwidefsky 67beef560bSMartin Schwidefsky static inline void clear_user_asce(void) 6802a8f3abSMartin Schwidefsky { 6902a8f3abSMartin Schwidefsky S390_lowcore.user_asce = S390_lowcore.kernel_asce; 70457f2180SHeiko Carstens 71457f2180SHeiko Carstens __ctl_load(S390_lowcore.user_asce, 1, 1); 72457f2180SHeiko Carstens __ctl_load(S390_lowcore.user_asce, 7, 7); 73457f2180SHeiko Carstens } 74457f2180SHeiko Carstens 75beef560bSMartin Schwidefsky static inline void load_kernel_asce(void) 76457f2180SHeiko Carstens { 77457f2180SHeiko Carstens unsigned long asce; 78457f2180SHeiko Carstens 79457f2180SHeiko Carstens __ctl_store(asce, 1, 1); 80457f2180SHeiko Carstens if (asce != S390_lowcore.kernel_asce) 81457f2180SHeiko Carstens __ctl_load(S390_lowcore.kernel_asce, 1, 1); 82d3a73acbSMartin Schwidefsky set_cpu_flag(CIF_ASCE); 8302a8f3abSMartin Schwidefsky } 8402a8f3abSMartin Schwidefsky 85c6557e7fSMartin Schwidefsky static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 86c6557e7fSMartin Schwidefsky struct task_struct *tsk) 87c6557e7fSMartin Schwidefsky { 8853e857f3SMartin Schwidefsky int cpu = smp_processor_id(); 8953e857f3SMartin Schwidefsky 90723cacbdSGerald Schaefer S390_lowcore.user_asce = next->context.asce; 9153e857f3SMartin Schwidefsky if (prev == next) 9253e857f3SMartin Schwidefsky return; 931b948d6cSMartin Schwidefsky cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 94*64f31d58SMartin Schwidefsky cpumask_set_cpu(cpu, mm_cpumask(next)); 9502a8f3abSMartin Schwidefsky /* Clear old ASCE by loading the kernel ASCE. */ 96beef560bSMartin Schwidefsky __ctl_load(S390_lowcore.kernel_asce, 1, 1); 97beef560bSMartin Schwidefsky __ctl_load(S390_lowcore.kernel_asce, 7, 7); 981b948d6cSMartin Schwidefsky cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 9953e857f3SMartin Schwidefsky } 10053e857f3SMartin Schwidefsky 10153e857f3SMartin Schwidefsky #define finish_arch_post_lock_switch finish_arch_post_lock_switch 10253e857f3SMartin Schwidefsky static inline void finish_arch_post_lock_switch(void) 10353e857f3SMartin Schwidefsky { 10453e857f3SMartin Schwidefsky struct task_struct *tsk = current; 10553e857f3SMartin Schwidefsky struct mm_struct *mm = tsk->mm; 10653e857f3SMartin Schwidefsky 107f8b13505SMartin Schwidefsky load_kernel_asce(); 108f8b13505SMartin Schwidefsky if (mm) { 10953e857f3SMartin Schwidefsky preempt_disable(); 110*64f31d58SMartin Schwidefsky while (atomic_read(&mm->context.flush_count)) 11153e857f3SMartin Schwidefsky cpu_relax(); 11253e857f3SMartin Schwidefsky 11353e857f3SMartin Schwidefsky if (mm->context.flush_mm) 11453e857f3SMartin Schwidefsky __tlb_flush_mm(mm); 11553e857f3SMartin Schwidefsky preempt_enable(); 116c6557e7fSMartin Schwidefsky } 117f8b13505SMartin Schwidefsky set_fs(current->thread.mm_segment); 118f8b13505SMartin Schwidefsky } 119c6557e7fSMartin Schwidefsky 120c6557e7fSMartin Schwidefsky #define enter_lazy_tlb(mm,tsk) do { } while (0) 121c6557e7fSMartin Schwidefsky #define deactivate_mm(tsk,mm) do { } while (0) 122c6557e7fSMartin Schwidefsky 123c6557e7fSMartin Schwidefsky static inline void activate_mm(struct mm_struct *prev, 124c6557e7fSMartin Schwidefsky struct mm_struct *next) 125c6557e7fSMartin Schwidefsky { 126c6557e7fSMartin Schwidefsky switch_mm(prev, next, current); 127beef560bSMartin Schwidefsky set_user_asce(next); 128c6557e7fSMartin Schwidefsky } 129c6557e7fSMartin Schwidefsky 1300f6f281bSMartin Schwidefsky static inline void arch_dup_mmap(struct mm_struct *oldmm, 1310f6f281bSMartin Schwidefsky struct mm_struct *mm) 1320f6f281bSMartin Schwidefsky { 1330f6f281bSMartin Schwidefsky } 1340f6f281bSMartin Schwidefsky 1350f6f281bSMartin Schwidefsky static inline void arch_exit_mmap(struct mm_struct *mm) 1360f6f281bSMartin Schwidefsky { 1370f6f281bSMartin Schwidefsky } 1380f6f281bSMartin Schwidefsky 13962e88b1cSDave Hansen static inline void arch_unmap(struct mm_struct *mm, 14062e88b1cSDave Hansen struct vm_area_struct *vma, 14162e88b1cSDave Hansen unsigned long start, unsigned long end) 14262e88b1cSDave Hansen { 14362e88b1cSDave Hansen } 14462e88b1cSDave Hansen 14562e88b1cSDave Hansen static inline void arch_bprm_mm_init(struct mm_struct *mm, 14662e88b1cSDave Hansen struct vm_area_struct *vma) 14762e88b1cSDave Hansen { 14862e88b1cSDave Hansen } 14962e88b1cSDave Hansen 1501b2ee126SDave Hansen static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, 151d61172b4SDave Hansen bool write, bool execute, bool foreign) 15233a709b2SDave Hansen { 15333a709b2SDave Hansen /* by default, allow everything */ 15433a709b2SDave Hansen return true; 15533a709b2SDave Hansen } 15633a709b2SDave Hansen 15733a709b2SDave Hansen static inline bool arch_pte_access_permitted(pte_t pte, bool write) 15833a709b2SDave Hansen { 15933a709b2SDave Hansen /* by default, allow everything */ 16033a709b2SDave Hansen return true; 16133a709b2SDave Hansen } 162c6557e7fSMartin Schwidefsky #endif /* __S390_MMU_CONTEXT_H */ 163