xref: /linux/arch/s390/include/asm/pgalloc.h (revision ead751507de86d90fa250431e9990a8b881f713c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com)
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/pgalloc.h"
9  *    Copyright (C) 1994  Linus Torvalds
10  */
11 
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
14 
15 #include <linux/threads.h>
16 #include <linux/gfp.h>
17 #include <linux/mm.h>
18 
19 #define CRST_ALLOC_ORDER 2
20 
21 unsigned long *crst_table_alloc(struct mm_struct *);
22 void crst_table_free(struct mm_struct *, unsigned long *);
23 
24 unsigned long *page_table_alloc(struct mm_struct *);
25 struct page *page_table_alloc_pgste(struct mm_struct *mm);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
28 void page_table_free_pgste(struct page *page);
29 extern int page_table_allocate_pgste;
30 
31 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
32 {
33 	struct addrtype { char _[256]; };
34 	int i;
35 
36 	for (i = 0; i < n; i += 256) {
37 		*s = val;
38 		asm volatile(
39 			"mvc	8(248,%[s]),0(%[s])\n"
40 			: "+m" (*(struct addrtype *) s)
41 			: [s] "a" (s));
42 		s += 256 / sizeof(long);
43 	}
44 }
45 
46 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
47 {
48 	clear_table(crst, entry, _CRST_TABLE_SIZE);
49 }
50 
51 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
52 {
53 	if (mm->context.asce_limit <= _REGION3_SIZE)
54 		return _SEGMENT_ENTRY_EMPTY;
55 	if (mm->context.asce_limit <= _REGION2_SIZE)
56 		return _REGION3_ENTRY_EMPTY;
57 	if (mm->context.asce_limit <= _REGION1_SIZE)
58 		return _REGION2_ENTRY_EMPTY;
59 	return _REGION1_ENTRY_EMPTY;
60 }
61 
62 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
63 void crst_table_downgrade(struct mm_struct *);
64 
65 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
66 {
67 	unsigned long *table = crst_table_alloc(mm);
68 
69 	if (table)
70 		crst_table_init(table, _REGION2_ENTRY_EMPTY);
71 	return (p4d_t *) table;
72 }
73 #define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
74 
75 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
76 {
77 	unsigned long *table = crst_table_alloc(mm);
78 	if (table)
79 		crst_table_init(table, _REGION3_ENTRY_EMPTY);
80 	return (pud_t *) table;
81 }
82 #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
83 
84 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
85 {
86 	unsigned long *table = crst_table_alloc(mm);
87 
88 	if (!table)
89 		return NULL;
90 	crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
91 	if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
92 		crst_table_free(mm, table);
93 		return NULL;
94 	}
95 	return (pmd_t *) table;
96 }
97 
98 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99 {
100 	pgtable_pmd_page_dtor(virt_to_page(pmd));
101 	crst_table_free(mm, (unsigned long *) pmd);
102 }
103 
104 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
105 {
106 	pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
107 }
108 
109 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
110 {
111 	p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
112 }
113 
114 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
115 {
116 	pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
117 }
118 
119 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
120 {
121 	unsigned long *table = crst_table_alloc(mm);
122 
123 	if (!table)
124 		return NULL;
125 	if (mm->context.asce_limit == _REGION3_SIZE) {
126 		/* Forking a compat process with 2 page table levels */
127 		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
128 			crst_table_free(mm, table);
129 			return NULL;
130 		}
131 	}
132 	return (pgd_t *) table;
133 }
134 
135 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
136 {
137 	if (mm->context.asce_limit == _REGION3_SIZE)
138 		pgtable_pmd_page_dtor(virt_to_page(pgd));
139 	crst_table_free(mm, (unsigned long *) pgd);
140 }
141 
142 static inline void pmd_populate(struct mm_struct *mm,
143 				pmd_t *pmd, pgtable_t pte)
144 {
145 	pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
146 }
147 
148 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
149 
150 #define pmd_pgtable(pmd) \
151 	(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
152 
153 /*
154  * page table entry allocation/free routines.
155  */
156 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
157 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
158 
159 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
160 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
161 
162 extern void rcu_table_freelist_finish(void);
163 
164 void vmem_map_init(void);
165 void *vmem_crst_alloc(unsigned long val);
166 pte_t *vmem_pte_alloc(void);
167 
168 #endif /* _S390_PGALLOC_H */
169