xref: /linux/arch/s390/include/asm/mmu_context.h (revision a3cbcadfdfc330c28a45f06e8f92fd1d59aafa19)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *
5  *  Derived from "include/asm-i386/mmu_context.h"
6  */
7 
8 #ifndef __S390_MMU_CONTEXT_H
9 #define __S390_MMU_CONTEXT_H
10 
11 #include <asm/pgalloc.h>
12 #include <linux/uaccess.h>
13 #include <linux/mm_types.h>
14 #include <asm/tlbflush.h>
15 #include <asm/ctl_reg.h>
16 #include <asm-generic/mm_hooks.h>
17 
18 #define init_new_context init_new_context
19 static inline int init_new_context(struct task_struct *tsk,
20 				   struct mm_struct *mm)
21 {
22 	unsigned long asce_type, init_entry;
23 
24 	spin_lock_init(&mm->context.lock);
25 	INIT_LIST_HEAD(&mm->context.pgtable_list);
26 	INIT_LIST_HEAD(&mm->context.gmap_list);
27 	cpumask_clear(&mm->context.cpu_attach_mask);
28 	atomic_set(&mm->context.flush_count, 0);
29 	atomic_set(&mm->context.is_protected, 0);
30 	mm->context.gmap_asce = 0;
31 	mm->context.flush_mm = 0;
32 #ifdef CONFIG_PGSTE
33 	mm->context.alloc_pgste = page_table_allocate_pgste ||
34 		test_thread_flag(TIF_PGSTE) ||
35 		(current->mm && current->mm->context.alloc_pgste);
36 	mm->context.has_pgste = 0;
37 	mm->context.uses_skeys = 0;
38 	mm->context.uses_cmm = 0;
39 	mm->context.allow_gmap_hpage_1m = 0;
40 #endif
41 	switch (mm->context.asce_limit) {
42 	default:
43 		/*
44 		 * context created by exec, the value of asce_limit can
45 		 * only be zero in this case
46 		 */
47 		VM_BUG_ON(mm->context.asce_limit);
48 		/* continue as 3-level task */
49 		mm->context.asce_limit = _REGION2_SIZE;
50 		fallthrough;
51 	case _REGION2_SIZE:
52 		/* forked 3-level task */
53 		init_entry = _REGION3_ENTRY_EMPTY;
54 		asce_type = _ASCE_TYPE_REGION3;
55 		break;
56 	case TASK_SIZE_MAX:
57 		/* forked 5-level task */
58 		init_entry = _REGION1_ENTRY_EMPTY;
59 		asce_type = _ASCE_TYPE_REGION1;
60 		break;
61 	case _REGION1_SIZE:
62 		/* forked 4-level task */
63 		init_entry = _REGION2_ENTRY_EMPTY;
64 		asce_type = _ASCE_TYPE_REGION2;
65 		break;
66 	}
67 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
68 			   _ASCE_USER_BITS | asce_type;
69 	crst_table_init((unsigned long *) mm->pgd, init_entry);
70 	return 0;
71 }
72 
73 static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
74 				      struct task_struct *tsk)
75 {
76 	int cpu = smp_processor_id();
77 
78 	if (next == &init_mm)
79 		S390_lowcore.user_asce = s390_invalid_asce;
80 	else
81 		S390_lowcore.user_asce = next->context.asce;
82 	cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
83 	/* Clear previous user-ASCE from CR7 */
84 	__ctl_load(s390_invalid_asce, 7, 7);
85 	if (prev != next)
86 		cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
87 }
88 #define switch_mm_irqs_off switch_mm_irqs_off
89 
90 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
91 			     struct task_struct *tsk)
92 {
93 	unsigned long flags;
94 
95 	local_irq_save(flags);
96 	switch_mm_irqs_off(prev, next, tsk);
97 	local_irq_restore(flags);
98 }
99 
100 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
101 static inline void finish_arch_post_lock_switch(void)
102 {
103 	struct task_struct *tsk = current;
104 	struct mm_struct *mm = tsk->mm;
105 
106 	if (mm) {
107 		preempt_disable();
108 		while (atomic_read(&mm->context.flush_count))
109 			cpu_relax();
110 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
111 		__tlb_flush_mm_lazy(mm);
112 		preempt_enable();
113 	}
114 	__ctl_load(S390_lowcore.user_asce, 7, 7);
115 }
116 
117 #define activate_mm activate_mm
118 static inline void activate_mm(struct mm_struct *prev,
119                                struct mm_struct *next)
120 {
121 	switch_mm(prev, next, current);
122 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
123 	__ctl_load(S390_lowcore.user_asce, 7, 7);
124 }
125 
126 #include <asm-generic/mmu_context.h>
127 
128 #endif /* __S390_MMU_CONTEXT_H */
129