1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 version 4 * 5 * Derived from "include/asm-i386/mmu_context.h" 6 */ 7 8 #ifndef __S390_MMU_CONTEXT_H 9 #define __S390_MMU_CONTEXT_H 10 11 #include <asm/pgalloc.h> 12 #include <linux/uaccess.h> 13 #include <linux/mm_types.h> 14 #include <asm/tlbflush.h> 15 #include <asm/ctlreg.h> 16 #include <asm/asce.h> 17 #include <asm-generic/mm_hooks.h> 18 19 #define init_new_context init_new_context 20 static inline int init_new_context(struct task_struct *tsk, 21 struct mm_struct *mm) 22 { 23 unsigned long asce_type, init_entry; 24 25 spin_lock_init(&mm->context.lock); 26 INIT_LIST_HEAD(&mm->context.gmap_list); 27 cpumask_clear(&mm->context.cpu_attach_mask); 28 atomic_set(&mm->context.flush_count, 0); 29 atomic_set(&mm->context.protected_count, 0); 30 mm->context.gmap_asce = 0; 31 mm->context.flush_mm = 0; 32 #if IS_ENABLED(CONFIG_KVM) 33 mm->context.allow_cow_sharing = 1; 34 #endif 35 switch (mm->context.asce_limit) { 36 default: 37 /* 38 * context created by exec, the value of asce_limit can 39 * only be zero in this case 40 */ 41 VM_BUG_ON(mm->context.asce_limit); 42 /* continue as 3-level task */ 43 mm->context.asce_limit = _REGION2_SIZE; 44 fallthrough; 45 case _REGION2_SIZE: 46 /* forked 3-level task */ 47 init_entry = _REGION3_ENTRY_EMPTY; 48 asce_type = _ASCE_TYPE_REGION3; 49 break; 50 case TASK_SIZE_MAX: 51 /* forked 5-level task */ 52 init_entry = _REGION1_ENTRY_EMPTY; 53 asce_type = _ASCE_TYPE_REGION1; 54 break; 55 case _REGION1_SIZE: 56 /* forked 4-level task */ 57 init_entry = _REGION2_ENTRY_EMPTY; 58 asce_type = _ASCE_TYPE_REGION2; 59 break; 60 } 61 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 62 _ASCE_USER_BITS | asce_type; 63 crst_table_init((unsigned long *) mm->pgd, init_entry); 64 return 0; 65 } 66 67 static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, 68 struct task_struct *tsk) 69 { 70 int cpu = smp_processor_id(); 71 72 if (next == &init_mm) 73 get_lowcore()->user_asce = s390_invalid_asce; 74 else 75 get_lowcore()->user_asce.val = next->context.asce; 76 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 77 /* Clear previous user-ASCE from CR1 and CR7 */ 78 local_ctl_load(1, &s390_invalid_asce); 79 local_ctl_load(7, &s390_invalid_asce); 80 if (prev != next) 81 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 82 } 83 #define switch_mm_irqs_off switch_mm_irqs_off 84 85 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 86 struct task_struct *tsk) 87 { 88 unsigned long flags; 89 90 local_irq_save(flags); 91 switch_mm_irqs_off(prev, next, tsk); 92 local_irq_restore(flags); 93 } 94 95 #define finish_arch_post_lock_switch finish_arch_post_lock_switch 96 static inline void finish_arch_post_lock_switch(void) 97 { 98 struct task_struct *tsk = current; 99 struct mm_struct *mm = tsk->mm; 100 unsigned long flags; 101 102 if (mm) { 103 preempt_disable(); 104 while (atomic_read(&mm->context.flush_count)) 105 cpu_relax(); 106 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 107 __tlb_flush_mm_lazy(mm); 108 preempt_enable(); 109 } 110 local_irq_save(flags); 111 if (test_thread_flag(TIF_ASCE_PRIMARY)) 112 local_ctl_load(1, &get_lowcore()->kernel_asce); 113 else 114 local_ctl_load(1, &get_lowcore()->user_asce); 115 local_ctl_load(7, &get_lowcore()->user_asce); 116 local_irq_restore(flags); 117 } 118 119 #define activate_mm activate_mm 120 static inline void activate_mm(struct mm_struct *prev, 121 struct mm_struct *next) 122 { 123 switch_mm_irqs_off(prev, next, current); 124 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 125 if (test_thread_flag(TIF_ASCE_PRIMARY)) 126 local_ctl_load(1, &get_lowcore()->kernel_asce); 127 else 128 local_ctl_load(1, &get_lowcore()->user_asce); 129 local_ctl_load(7, &get_lowcore()->user_asce); 130 } 131 132 #include <asm-generic/mmu_context.h> 133 134 #endif /* __S390_MMU_CONTEXT_H */ 135