1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/alpha/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 */
7
8 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9
10 #include <linux/pagemap.h>
11 #include <linux/signal.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/errno.h>
15 #include <linux/string.h>
16 #include <linux/types.h>
17 #include <linux/ptrace.h>
18 #include <linux/mman.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/init.h>
22 #include <linux/memblock.h> /* max_low_pfn */
23 #include <linux/vmalloc.h>
24 #include <linux/gfp.h>
25
26 #include <linux/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hwrpb.h>
29 #include <asm/dma.h>
30 #include <asm/mmu_context.h>
31 #include <asm/console.h>
32 #include <asm/tlb.h>
33 #include <asm/setup.h>
34 #include <asm/sections.h>
35
36 #include "../kernel/proto.h"
37
38 static struct pcb_struct original_pcb;
39
40 pgd_t *
pgd_alloc(struct mm_struct * mm)41 pgd_alloc(struct mm_struct *mm)
42 {
43 pgd_t *ret, *init;
44
45 ret = __pgd_alloc(mm, 0);
46 init = pgd_offset(&init_mm, 0UL);
47 if (ret) {
48 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51 #else
52 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53 #endif
54
55 /* The last PGD entry is the VPTB self-map. */
56 pgd_val(ret[PTRS_PER_PGD-1])
57 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 }
59 return ret;
60 }
61
62
63 static inline unsigned long
load_PCB(struct pcb_struct * pcb)64 load_PCB(struct pcb_struct *pcb)
65 {
66 register unsigned long sp __asm__("$30");
67 pcb->ksp = sp;
68 return __reload_thread(pcb);
69 }
70
71 /* Set up initial PCB, VPTB, and other such nicities. */
72
73 static inline void
switch_to_system_map(void)74 switch_to_system_map(void)
75 {
76 unsigned long newptbr;
77 unsigned long original_pcb_ptr;
78
79 /* Initialize the kernel's page tables. Linux puts the vptb in
80 the last slot of the L1 page table. */
81 memset(swapper_pg_dir, 0, PAGE_SIZE);
82 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
83 pgd_val(swapper_pg_dir[1023]) =
84 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
85
86 /* Set the vptb. This is often done by the bootloader, but
87 shouldn't be required. */
88 if (hwrpb->vptb != 0xfffffffe00000000UL) {
89 wrvptptr(0xfffffffe00000000UL);
90 hwrpb->vptb = 0xfffffffe00000000UL;
91 hwrpb_update_checksum(hwrpb);
92 }
93
94 /* Also set up the real kernel PCB while we're at it. */
95 init_thread_info.pcb.ptbr = newptbr;
96 init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
97 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
98 tbia();
99
100 /* Save off the contents of the original PCB so that we can
101 restore the original console's page tables for a clean reboot.
102
103 Note that the PCB is supposed to be a physical address, but
104 since KSEG values also happen to work, folks get confused.
105 Check this here. */
106
107 if (original_pcb_ptr < PAGE_OFFSET) {
108 original_pcb_ptr = (unsigned long)
109 phys_to_virt(original_pcb_ptr);
110 }
111 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
112 }
113
114 int callback_init_done;
115
116 void * __init
callback_init(void * kernel_end)117 callback_init(void * kernel_end)
118 {
119 struct crb_struct * crb;
120 pgd_t *pgd;
121 p4d_t *p4d;
122 pud_t *pud;
123 pmd_t *pmd;
124 void *two_pages;
125
126 /* Starting at the HWRPB, locate the CRB. */
127 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
128
129 if (alpha_using_srm) {
130 /* Tell the console whither it is to be remapped. */
131 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
132 __halt(); /* "We're boned." --Bender */
133
134 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
135 crb->dispatch_va = (struct procdesc_struct *)
136 (VMALLOC_START + (unsigned long)crb->dispatch_va
137 - crb->map[0].va);
138 crb->fixup_va = (struct procdesc_struct *)
139 (VMALLOC_START + (unsigned long)crb->fixup_va
140 - crb->map[0].va);
141 }
142
143 switch_to_system_map();
144
145 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
146 these to actually remap the console. There is an assumption
147 here that only one of each is needed, and this allows for 8MB.
148 On systems with larger consoles, additional pages will be
149 allocated as needed during the mapping process.
150
151 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
152 we need to allocate the PGD we use for vmalloc before we start
153 forking other tasks. */
154
155 two_pages = (void *)
156 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
157 kernel_end = two_pages + 2*PAGE_SIZE;
158 memset(two_pages, 0, 2*PAGE_SIZE);
159
160 pgd = pgd_offset_k(VMALLOC_START);
161 p4d = p4d_offset(pgd, VMALLOC_START);
162 pud = pud_offset(p4d, VMALLOC_START);
163 pud_set(pud, (pmd_t *)two_pages);
164 pmd = pmd_offset(pud, VMALLOC_START);
165 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
166
167 if (alpha_using_srm) {
168 static struct vm_struct console_remap_vm;
169 unsigned long nr_pages = 0;
170 unsigned long vaddr;
171 unsigned long i, j;
172
173 /* calculate needed size */
174 for (i = 0; i < crb->map_entries; ++i)
175 nr_pages += crb->map[i].count;
176
177 /* register the vm area */
178 console_remap_vm.flags = VM_ALLOC;
179 console_remap_vm.size = nr_pages << PAGE_SHIFT;
180 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
181
182 vaddr = (unsigned long)console_remap_vm.addr;
183
184 /* Set up the third level PTEs and update the virtual
185 addresses of the CRB entries. */
186 for (i = 0; i < crb->map_entries; ++i) {
187 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
188 crb->map[i].va = vaddr;
189 for (j = 0; j < crb->map[i].count; ++j) {
190 /* Newer consoles (especially on larger
191 systems) may require more pages of
192 PTEs. Grab additional pages as needed. */
193 if (pmd != pmd_offset(pud, vaddr)) {
194 memset(kernel_end, 0, PAGE_SIZE);
195 pmd = pmd_offset(pud, vaddr);
196 pmd_set(pmd, (pte_t *)kernel_end);
197 kernel_end += PAGE_SIZE;
198 }
199 set_pte(pte_offset_kernel(pmd, vaddr),
200 pfn_pte(pfn, PAGE_KERNEL));
201 pfn++;
202 vaddr += PAGE_SIZE;
203 }
204 }
205 }
206
207 callback_init_done = 1;
208 return kernel_end;
209 }
210
211 /*
212 * paging_init() sets up the memory map.
213 */
paging_init(void)214 void __init paging_init(void)
215 {
216 unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
217 unsigned long dma_pfn;
218
219 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
220 max_pfn = max_low_pfn;
221
222 max_zone_pfn[ZONE_DMA] = dma_pfn;
223 max_zone_pfn[ZONE_NORMAL] = max_pfn;
224
225 /* Initialize mem_map[]. */
226 free_area_init(max_zone_pfn);
227
228 /* Initialize the kernel's ZERO_PGE. */
229 memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
230 }
231
232 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
233 void
srm_paging_stop(void)234 srm_paging_stop (void)
235 {
236 /* Move the vptb back to where the SRM console expects it. */
237 swapper_pg_dir[1] = swapper_pg_dir[1023];
238 tbia();
239 wrvptptr(0x200000000UL);
240 hwrpb->vptb = 0x200000000UL;
241 hwrpb_update_checksum(hwrpb);
242
243 /* Reload the page tables that the console had in use. */
244 load_PCB(&original_pcb);
245 tbia();
246 }
247 #endif
248
249 static const pgprot_t protection_map[16] = {
250 [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
251 _PAGE_FOR),
252 [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
253 [VM_WRITE] = _PAGE_P(_PAGE_FOE),
254 [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
255 [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
256 [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
257 [VM_EXEC | VM_WRITE] = _PAGE_P(0),
258 [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
259 [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
260 _PAGE_FOR),
261 [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
262 [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
263 [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
264 [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
265 [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
266 [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
267 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
268 };
269 DECLARE_VM_GET_PAGE_PROT
270